prompt
listlengths
1
1
compression_prompt
listlengths
1
1
target
stringlengths
1.03k
828k
[ { "content": "Produce an exact reconstruction of the code:\n```python\n#!/usr/bin/env python\n\n# pylint: disable=broad-except\n\nimport re\nimport json\nimport time\n\nfrom reporting.utilities import getLogger, get_hostname\nfrom reporting.exceptions import InputDataError\n\nlog = getLogger(__name__)\n\nclass ...
[ { "content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\n#!/usr/bin/env python\n\n# pylint: disable=broad-except\n\nimport re\nimport json\nimport time\n\nfrom reporting.utilities import getLogger, get_hostname\nfrom reporting.exceptions import InputDataError\n\nlog = getLogger(__n...
```python #!/usr/bin/env python # pylint: disable=broad-except import re import json import time from reporting.utilities import getLogger, get_hostname from reporting.exceptions import InputDataError log = getLogger(__name__) class IParser(object): def parse(self): assert 0, "This method must be defined." class MatchParser(IParser): def __init__(self, pattern, transform): self.__pattern=pattern self.__transform=transform def parse(self, data): log.debug("match %s" % self.__pattern) match_obj = re.match(self.__pattern, data, re.M|re.I) match_groups = match_obj.groups() result = self.__transform.format(*match_groups) output = json.loads(result) output['timestamp'] = int(time.time()) output['hostname'] = get_hostname() return output class SplitParser(IParser): def __init__(self, delimiter, transform): self.__delimiter=delimiter self.__transform=transform def parse(self, data): log.debug("delimiter %s" % self.__delimiter) list = [item.strip() for item in re.split(self.__delimiter, data)] result = self.__transform.format(*list) log.debug("result %s"%result) output = json.loads(result) log.debug("output %s"%output) output['timestamp'] = int(time.time()) output['hostname'] = get_hostname() return output class DummyParser(IParser): def parse(self, data): output = {} output['timestamp'] = int(time.time()) output['hostname'] = get_hostname() output['content'] = data.strip() return output class JsonGrepParser(IParser): def __init__(self, pattern=None, list_name="list"): self.__pattern=pattern self.__list_name=list_name def parse(self, data): output = {} output['timestamp'] = int(time.time()) output['hostname'] = get_hostname() try: j = json.loads(data) except ValueError as e: log.exception('Could not load JSON object from input data.') raise InputDataError() if self.__pattern and len(self.__pattern)>0: self.jsongrep(j, map(re.compile, self.__pattern.split(" ")), output) if self.__list_name in output and len(output[self.__list_name])==1: output[self.__list_name]=output[self.__list_name][0] else: if isinstance(j, list): output[self.__list_name]=j else: output.update(j) return output # from http://blogs.fluidinfo.com/terry/2010/11/25/jsongrep-py-python-for-extracting-pieces-of-json-objects/ def jsongrep(self, d, patterns, output): #print "jsongrep %s" % d try: pattern = patterns.pop(0) except IndexError: if isinstance(d, dict): output.update(d) else: self.add_value(d, output) else: if isinstance(d, dict): #print "dict" keys = filter(pattern.match, d.keys()) elif isinstance(d, list): #print "lsit" keys = map(int, filter(pattern.match, ['%d' % i for i in range(len(d))])) #print keys else: #print "str "+str(d) if pattern.match(str(d)): self.add_value(d, output) return for item in (d[key] for key in keys): self.jsongrep(item, patterns[:], output) def add_value(self, d, output): if not self.__list_name in output: output[self.__list_name]=[] #print output if isinstance(d, list): output[self.__list_name].extend(d) else: output[self.__list_name].append(d) ```
[ { "content": "Here is the script:\n```python\n#!/usr/bin/env python\n###############################################################################\n# $Id: elas.py 32163 2015-12-13 17:44:50Z goatbar $\n#\n# Project: GDAL/OGR Test Suite\n# Purpose: Test ELAS driver\n# Author: Even Rouault, <even dot rouault...
[ { "content": "Here is the script:\n<|memory_start|>```python\n#!/usr/bin/env python\n###############################################################################\n# $Id: elas.py 32163 2015-12-13 17:44:50Z goatbar $\n#\n# Project: GDAL/OGR Test Suite\n# Purpose: Test ELAS driver\n# Author: Even Rouault, <...
```python #!/usr/bin/env python ############################################################################### # $Id: elas.py 32163 2015-12-13 17:44:50Z goatbar $ # # Project: GDAL/OGR Test Suite # Purpose: Test ELAS driver # Author: Even Rouault, <even dot rouault at mines dash paris dot org> # ############################################################################### # Copyright (c) 2009, Even Rouault <even dot rouault at mines-paris dot org> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ############################################################################### import sys sys.path.append( '../pymod' ) import gdaltest ############################################################################### # Test a dataset generated by Create() def elas_1(): tst = gdaltest.GDALTest( 'ELAS', 'byte_elas.bin', 1, 4672 ) return tst.testOpen() ############################################################################### # Test Create() def elas_2(): tst = gdaltest.GDALTest( 'ELAS', 'byte_elas.bin', 1, 4672 ) return tst.testCreate() gdaltest_list = [ elas_1, elas_2 ] if __name__ == '__main__': gdaltest.setup_run( 'elas' ) gdaltest.run_tests( gdaltest_list ) gdaltest.summarize() ```
[ { "content": "```python\nimport email\r\nimport imaplib\r\nimport json\r\n\r\nfrom input.imap.connection import open_connection\r\n\r\nfrom input.imap.header import get_mail_header\r\nfrom util.read_utils import lines_per_n\r\n\r\n\r\ndef get_unavailable_uid():\r\n \"\"\"\r\n This function returns a list ...
[ { "content": "<|memory_start|>```python\nimport email\r\nimport imaplib\r\nimport json\r\n\r\nfrom input.imap.connection import open_connection\r\n\r\nfrom input.imap.header import get_mail_header\r\nfrom util.read_utils import lines_per_n\r\n\r\n\r\ndef get_unavailable_uid():\r\n \"\"\"\r\n This function...
```python import email import imaplib import json from input.imap.connection import open_connection from input.imap.header import get_mail_header from util.read_utils import lines_per_n def get_unavailable_uid(): """ This function returns a list of UIDs that are not available in the IMAP server :return: List containing the UIDs not available in the IMAP server """ imaplib._MAXLINE = 800000 conn = open_connection() conn.select('INBOX') search_str = 'UID ' + '1:*' retcode, uids = conn.uid('SEARCH', None, search_str) available_uid = [] for uid in uids[0].split(): available_uid.append(int(uid)) try: conn.close() except: pass conn.logout() return set(range(min(available_uid), max(available_uid)+1)) - set(available_uid) # This list stores the UIDs of mails that have duplicate entries in the JSON file. duplicate_uid = set() # This set stores the UIDs of mails that don't have an entry in the JSON file - UIDs are consecutive numbers. missing_uid = set() # This list stores the UIDs of mails that have entries with insufficient entries in the JSON file. invalid_uid = set() # This set stores the UIDs of mails that are not forwarded from LKML subscription which is stored in a text file. unwanted_uid = set() # This set stores the UIDs for which corresponding mails are not available in the IMAP server. unavailable_uid = set() last_uid_read = 0 def check_validity(check_unavailable_uid='False', json_header_filename='headers.json'): """ This function checks for and prints duplicate, missing, and invalid objects in the "headers.json" file. This function can be run first to generate a list of duplicate, missing, or invalid objects' UIDs which can then be used to add or remove their entries from the JSON file. :return: Last UID that was checked by the function. """ previous_uid = 0 # The "read_uid" set is used to keep track of all the UIDs that have been read from the JSON file. # In case a duplicate exists, it would be read twice and hence would fail the set membership test. read_uid = set([]) # This variable contains the last UID that was checked. This variable is returned by the function. last_valid_uid = 0 header_attrib = {'Message-ID', 'From', 'To', 'Cc', 'In-Reply-To', 'Time'} # Read UIDs of mails that are not forwarded from LKML subscription which is stored in a text file. with open(json_header_filename, 'r') as json_file: for chunk in lines_per_n(json_file, 9): try: json_obj = json.loads(chunk) except: print("Unreadable JSON object after UID: " + str(previous_uid)) break # Checking for duplicate objects if not json_obj['Message-ID'] in read_uid: read_uid.add(json_obj['Message-ID']) else: duplicate_uid.add(json_obj['Message-ID']) # Check if the JSON object has sufficient attributes by checking if "header_attrib" is a subset of its keys if not set(header_attrib) <= json_obj.keys() or json_obj['Time'] is None: invalid_uid.add(json_obj['Message-ID']) # Check if it is a mail that is sent directly to "lkml.subscriber@gmail.com", in which caseit has not been # forwarded from the LKML subscription. if json_obj['To'] == "lkml.subscriber@gmail.com": unwanted_uid.add(json_obj['Message-ID']) previous_uid = json_obj['Message-ID'] # Calculate the missing UIDs by performing a set difference on all the UIDs possible till the highest UID read # from the actual UIDs that have been read. if previous_uid != 0: global last_uid_read last_uid_read = max(read_uid) global missing_uid missing_uid = set(range(min(read_uid), last_uid_read+1)) - read_uid global unavailable_uid if check_unavailable_uid: unavailable_uid = get_unavailable_uid() print("Unavailable UIDs: ", unavailable_uid if len(unavailable_uid) > 0 else "None") with open("unwanted_uid.txt", 'a') as unw_file: for uid in unwanted_uid: unw_file.write(str(uid) + '\n') print("Unwanted UIDs: ", unwanted_uid if len(unwanted_uid) > 0 else "None") print("Duplicate UIDs: ", duplicate_uid if len(duplicate_uid) > 0 else "None") print("Missing UIDs: ", missing_uid if len(missing_uid) > 0 else "None") print("Invalid UIDs: ", invalid_uid if len(invalid_uid) > 0 else "None") return last_uid_read def remove_unwanted_headers(to_remove=unwanted_uid, json_header_filename='headers.json'): """ This function removes all the UIDs specified in the to_remove parameter. By default, it removes all the unwanted entries in the JSON file, i.e. the list of UIDs of mails that are not forwarded from LKML subscription. :param to_remove: A list of UIDs that need to be removed. Default value is the list of unwanted mails' UIDs """ if len(to_remove) > 0: print("Removing unwanted headers...") # This list contains a list of JSON objects that need to be written to file write_to_file = [] with open(json_header_filename, 'r') as json_file: for chunk in lines_per_n(json_file, 9): json_obj = json.loads(chunk) if not json_obj['Message-ID'] in unwanted_uid: write_to_file.append(json_obj) with open(json_header_filename, 'w') as json_file: for json_obj in write_to_file: json.dump(json_obj, json_file, indent=1) json_file.write("\n") def remove_duplicate_headers(to_remove=duplicate_uid, json_header_filename='headers.json'): """ This function removes all the duplicate entries of the UIDs specified in the to_remove parameter. By default, it removes all the duplicate entries in the JSON file. :param to_remove: A list of UIDs that need to be removed. Default value is the list of duplicate mails' UIDs. """ # The "read_uid" set is used to keep track of all the UIDs that have been read from the JSON file. # In case a duplicate exists, it would be read twice and hence would fail the set membership test. read_uid = set([]) if len(to_remove) > 0: print("Removing duplicate headers...") # This list contains a list of JSON objects that need to be written to file write_to_file = [] with open(json_header_filename, 'r') as json_file: for chunk in lines_per_n(json_file, 9): json_obj = json.loads(chunk) if not json_obj['Message-ID'] in read_uid: write_to_file.append(json_obj) read_uid.add(json_obj['Message-ID']) with open(json_header_filename, 'w') as json_file: for json_obj in write_to_file: json.dump(json_obj, json_file, indent=1) json_file.write("\n") def add_missing_headers(to_add=missing_uid, unwanted_uid_filename="unwanted_uid.txt"): """ This function adds the mails that have been missed out, considering the fact that UIDs are consecutive. If a mail that is missing in the JSON file is not available or has been deleted, this function ignores that UID. :param to_add: A list of UIDs that need to be added. Default value is the list of missing mails' UIDs. """ # To prevent replacement of mails that are not forwarded from the LKML subscription: with open(unwanted_uid_filename, 'r') as unw_file: for line in unw_file: unwanted_uid.add(int(line.strip())) to_add = [x for x in to_add if x not in unwanted_uid] # To prevent attempts to replace mails are known to be not available in the IMAP server: to_add = [x for x in to_add if x not in unavailable_uid] if len(to_add) > 0: print("Fetching missing headers...") get_mail_header(to_add, False) def replace_invalid_headers(to_replace=invalid_uid, json_header_filename="headers.json"): """ This function removes the mail headers that have insufficient attributes and fetches those headers again. If an attribute is missing in the original mail header or if the mail has been deleted, this function ignores that UID. :param to_replace: A list of UIDs that need to be replaced. Default value is the list of invalid mails' UIDs. """ if len(to_replace) > 0: print("Replacing invalid headers...") # This list contains a list of JSON objects that need to be written to file write_to_file = [] with open(json_header_filename, 'r') as json_file: for chunk in lines_per_n(json_file, 9): json_obj = json.loads(chunk) if not json_obj['Message-ID'] in invalid_uid: write_to_file.append(json_obj) with open(json_header_filename, 'w') as json_file: for json_obj in write_to_file: json.dump(json_obj, json_file, indent=1) json_file.write("\n") add_missing_headers(to_replace) def write_uid_map(from_index=1, to_index=last_uid_read, uid_map_filename="thread_uid_map.json"): """ To ensure that references are correctly recorded in the JSON file such that there are no references to mails that do not exist and to ease the processing of headers, a map with the string in the Message-Id field of the header to the UID of the mail is required. This function fetches the headers from the IMAP server and adds the required pairs of Message_ID and UID to the JSON file. :param from_index: Fetches headers from this UID onwards. :param to_index: Fetches headers till this UID (non inclusive). """ with open(uid_map_filename, 'r') as map_file: uid_msg_id_map = json.load(map_file) map_file.close() to_get = list(range(from_index, to_index)) imaplib._MAXLINE = 800000 conn = open_connection() try: conn.select('INBOX') for num in to_get: # conn.uid() converts the arguments provided to an IMAP command to fetch the mail using the UID sepcified by num # Uncomment the line below to fetch the entire message rather than just the mail headers. # typ, msg_header = conn.uid('FETCH', num, '(RFC822)') typ, msg_header = conn.uid('FETCH', str(num), '(RFC822.HEADER)') for response_part in msg_header: if isinstance(response_part, tuple): print("Processing mail #", num) # "response_part" contains the required info as a byte stream. # This has to be converted to a message stream using the email module original = email.message_from_bytes(response_part[1]) # The splicing is done as to remove the '<' and '>' from the message-id string uid_msg_id_map[original['Message-ID'][1:-1]] = num finally: try: conn.close() except: pass conn.logout() with open(uid_map_filename, mode='w', encoding='utf-8') as f: json.dump(uid_msg_id_map, f, indent=1) f.close() ```
[ { "content": "```python\nfrom gobjcreator3.codegen.code_generator import CodeGenerator\nfrom gobjcreator3.codegen.output import StdOut\nfrom gobjcreator3.codegen.name_creator import NameCreator\nfrom gobjcreator3.codegen.c_marshaller_generator import CMarshallerGenerator, CMarshallerNameCreator\nfrom gobjcreato...
[ { "content": "<|memory_start|>```python\nfrom gobjcreator3.codegen.code_generator import CodeGenerator\nfrom gobjcreator3.codegen.output import StdOut\nfrom gobjcreator3.codegen.name_creator import NameCreator\nfrom gobjcreator3.codegen.c_marshaller_generator import CMarshallerGenerator, CMarshallerNameCreator\...
```python from gobjcreator3.codegen.code_generator import CodeGenerator from gobjcreator3.codegen.output import StdOut from gobjcreator3.codegen.name_creator import NameCreator from gobjcreator3.codegen.c_marshaller_generator import CMarshallerGenerator, CMarshallerNameCreator from gobjcreator3.model.type import Type from gobjcreator3.model.visibility import Visibility from gobjcreator3.model.method import Parameter from gobjcreator3.model.property import PropType, PropAccess from gobjcreator3.model.ginterface import GInterface import os import re import faberscriptorum class CGenConfig(object): def __init__(self): self.generate_base_functions = False self.generate_constructor = False self.generate_setter_getter = False self.verbose = False self.header_text_file = "" self.directory_per_module = True class CCodeGenerator(CodeGenerator): def __init__(self, root_module, origin, out=StdOut(), config=CGenConfig()): CodeGenerator.__init__(self, root_module, origin, out) self._config = config self._dir_stack = [] self._cur_dir = "" self._name_creator = NameCreator() self._template_dir = os.path.dirname(__file__) + os.sep + "templates" + os.sep + "c" self._refresh_template_processor() self._regex_type_w_ptrs = re.compile(r"(\w+)(\s*)(\*+)") def generate(self): self._generate_module(self._root_module) def _generate_module(self, module): if self._config.directory_per_module: if self._cur_dir: self._cur_dir += os.sep + module.name else: self._cur_dir = module.name self._dir_stack.append(self._cur_dir) self._out.enter_dir(self._cur_dir) for m in module.modules: self._generate_module(m) self._refresh_template_processor() self._setup_module_symbols(module) objs = [obj for obj in module.objects if obj.filepath_origin == self._origin] for obj in objs: self._setup_gobject_symbols(obj) self._gen_object_header(obj) self._gen_object_prot_header(obj) self._gen_object_source(obj) if obj.has_signals(): self._gen_object_marshallers(obj) intfs = [intf for intf in module.interfaces if intf.filepath_origin == self._origin] for intf in intfs: self._setup_ginterface_symbols(intf) self._gen_interface_header(intf) self._gen_interface_source(intf) if intf.signals: self._gen_object_marshallers(intf) enums = [enum for enum in module.enumerations if enum.filepath_origin == self._origin] for enum in enums: self._setup_genum_symbols(enum) self._gen_enum_header(enum) self._gen_enum_source(enum) all_flags = [flags for flags in module.flags if flags.filepath_origin == self._origin] for flags in all_flags: self._setup_gflags_symbols(flags) self._gen_flags_header(flags) self._gen_flags_source(flags) error_domains = [error_domain for error_domain in module.error_domains if error_domain.filepath_origin == self._origin] for error_domain in error_domains: self._setup_gerror_symbols(error_domain) self._gen_error_header(error_domain) if self._config.directory_per_module: self._out.exit_dir(self._cur_dir) self._dir_stack.pop() if self._dir_stack: self._cur_dir = self._dir_stack[-1] else: self._cur_dir = "" def _gen_object_header(self, obj): file_path = self._full_path(self._name_creator.create_obj_header_name(obj)) lines = self._get_lines_from_template("gobject_header.template", file_path) self._create_text_file(file_path, lines) def _gen_object_prot_header(self, obj): if not obj.has_protected_members() and obj.is_final: return file_path = self._full_path(self._name_creator.create_obj_prot_header_name(obj)) lines = self._get_lines_from_template("gobject_header_prot.template", file_path) self._create_text_file(file_path, lines) def _gen_object_source(self, obj): file_path = self._full_path(self._name_creator.create_obj_source_name(obj)) lines = self._get_lines_from_template("gobject_source.template", file_path) self._create_text_file(file_path, lines) def _gen_interface_header(self, intf): file_path = self._full_path(self._name_creator.create_obj_header_name(intf)) lines = self._get_lines_from_template("ginterface_header.template", file_path) self._create_text_file(file_path, lines) def _gen_interface_source(self, intf): file_path = self._full_path(self._name_creator.create_obj_source_name(intf)) lines = self._get_lines_from_template("ginterface_source.template", file_path) self._create_text_file(file_path, lines) def _gen_object_marshallers(self, clif): is_interface = isinstance(clif, GInterface) header_guard = "__" modprefix = self._template_processor.getSymbol("MODULE_PREFIX") if modprefix: header_guard += modprefix + "_" if not is_interface: header_guard += self._template_processor.getSymbol("CLASS_NAME") else: header_guard += self._template_processor.getSymbol("INTF_NAME") header_guard += "_MARSHALLER_H__" if not is_interface: prefix = self._template_processor.getSymbol("class_prefix") else: prefix = self._template_processor.getSymbol("intf_prefix") signals = clif.get_signals() generator = CMarshallerGenerator( self._header_comment(), header_guard, prefix, signals, self._out ) header_file_path = self._full_path(self._name_creator.create_obj_marshaller_header_name(clif)) if self._config.verbose: print("generating %s..." % header_file_path, end="") generator.generate_header(header_file_path) if self._config.verbose: print("done") source_file_path = self._full_path(self._name_creator.create_obj_marshaller_source_name(clif)) if self._config.verbose: print("generating %s..." % source_file_path, end="") generator.generate_source(source_file_path) if self._config.verbose: print("done") def _gen_enum_header(self, enum): file_path = self._full_path(self._name_creator.create_filename_wo_suffix(enum) + ".h") lines = self._get_lines_from_template("genum_header.template", file_path) self._create_text_file(file_path, lines) def _gen_enum_source(self, enum): file_path = self._full_path(self._name_creator.create_filename_wo_suffix(enum) + ".c") lines = self._get_lines_from_template("genum_source.template", file_path) self._create_text_file(file_path, lines) def _gen_flags_header(self, flags): file_path = self._full_path(self._name_creator.create_filename_wo_suffix(flags) + ".h") lines = self._get_lines_from_template("gflags_header.template", file_path) self._create_text_file(file_path, lines) def _gen_flags_source(self, flags): file_path = self._full_path(self._name_creator.create_filename_wo_suffix(flags) + ".c") lines = self._get_lines_from_template("gflags_source.template", file_path) self._create_text_file(file_path, lines) def _gen_error_header(self, error_domain): file_path = self._full_path(self._name_creator.create_filename_wo_suffix(error_domain) + ".h") lines = self._get_lines_from_template("gerror_header.template", file_path) self._create_text_file(file_path, lines) def _full_path(self, basename): if self._cur_dir: return self._cur_dir + os.sep + basename else: return basename def _create_text_file(self, file_path, lines): if self._config.verbose: print("generating %s..." % file_path, end="") self._out.visit_text_file(file_path, lines) if self._config.verbose: print("done") def _get_lines_from_template(self, template_file, file_path): self._out.prepare_file_creation(file_path, self._template_processor) template_path = self._template_dir + os.sep + template_file template_path = os.path.abspath(template_path) out_buffer = self._template_processor.createStringOut() self._template_processor.createCode(template_path, out_buffer) lines = out_buffer.content.split(os.linesep) # Remove adjacent empty lines: res = [] prev = None for line in lines: line = line.rstrip() if line: res.append(line) else: if prev is None or prev: res.append(line) prev = line return res def _refresh_template_processor(self): self._template_processor = faberscriptorum.API() self._template_processor.setEditableSectionStyle(self._template_processor.Language.C) self._template_processor.setIncludePath([self._template_dir]) self._template_processor["header_comment"] = self._header_comment() self._template_processor["config"] = self._config self._template_processor["TRUE"] = True self._template_processor["FALSE"] = False self._template_processor["PUBLIC"] = Visibility.PUBLIC self._template_processor["PROTECTED"] = Visibility.PROTECTED self._template_processor["PRIVATE"] = Visibility.PRIVATE self._template_processor["OBJECT"] = Type.OBJECT self._template_processor["INTERFACE"] = Type.INTERFACE self._template_processor["type_name"] = self._name_creator.create_full_type_name self._template_processor["TYPE_MACRO"] = self._name_creator.create_type_macro self._template_processor["CAST_MACRO"] = self._name_creator.create_cast_macro self._template_processor["increment"] = self._increment self._template_processor["is_empty"] = self._is_empty self._template_processor["is_none"] = self._is_none self._template_processor["literal_trim"] = self._literal_trim self._template_processor["length"] = self._length self._template_processor["to_upper"] = self._to_upper self._template_processor["to_lower"] = self._to_lower self._template_processor["rearrange_asterisk"] = self._rearrange_asterisk self._template_processor["method_basename"] = self._method_basename self._template_processor["method_result"] = self._method_result self._template_processor["method_signature"] = self._method_signature self._template_processor["method_signature_by_name"] = self._method_signature_by_name self._template_processor["method_by_name"] = self._method_by_name self._template_processor["method_call_args"] = self._method_call_args self._template_processor["method_def_class"] = self._method_def_class self._template_processor["method_def_class_cast"] = self._method_def_class_cast def _setup_module_symbols(self, module): camel_case_prefix = module.name.capitalize() curmod = module while curmod.module: curmod = curmod.module if curmod.name: camel_case_prefix = curmod.name.capitalize() + camel_case_prefix prefix = self._name_creator.replace_camel_case(camel_case_prefix, "_") self._template_processor["module_prefix"] = self._module_prefix(module) self._template_processor["MODULE_PREFIX"] = prefix.upper() self._template_processor["ModulePrefix"] = camel_case_prefix self._template_processor["filename_wo_suffix"] = self._name_creator.create_filename_wo_suffix def _setup_gobject_symbols(self, obj): self._template_processor["class"] = obj self._template_processor["ClassName"] = obj.name self._template_processor["CLASS_NAME"] = self._name_creator.replace_camel_case(obj.name, "_").upper() self._template_processor["FullClassName"] = self._template_processor.getSymbol("ModulePrefix") + obj.name prefix = obj.cfunc_prefix or self._name_creator.replace_camel_case(obj.name, "_").lower() module_prefix = self._template_processor.getSymbol("module_prefix") if module_prefix: prefix = module_prefix + "_" + prefix self._template_processor["class_prefix"] = prefix self._template_processor["protected_header"] = self._name_creator.create_obj_prot_header_name self._template_processor["marshaller_header"] = self._name_creator.create_obj_marshaller_header_name self._template_processor["hasProtectedMembers"] = obj.has_protected_members() self._template_processor["PROP_NAME"] = self._name_creator.create_property_enum_value self._template_processor["prop_tech_name"] = self._name_creator.create_property_tech_name self._template_processor["PropType"] = PropType self._template_processor["PropAccess"] = PropAccess self._template_processor["prop_value"] = self._property_value self._template_processor["prop_gtype"] = self._property_gtype self._template_processor["prop_flags"] = self._property_flags self._template_processor["prop_setter_section"] = self._property_setter_section self._template_processor["prop_getter_section"] = self._property_getter_section self._template_processor["prop_set_section"] = self._property_setter_section self._template_processor["prop_get_section"] = self._property_getter_section self._template_processor["is_prop_init_required"] = self._is_property_init_required self._template_processor["signal_tech_name"] = self._signal_technical_name self._template_processor["signal_section_defhandler"] = self._signal_section_defhandler if obj.has_signals(): self._marshaller_names = CMarshallerNameCreator(prefix) self._template_processor["marshaller_func"] = self._marshaller_names.create_marshaller_name else: self._marshaller_names = None self._template_processor["interface_impl_funcname"] = self._interface_impl_funcname def _setup_ginterface_symbols(self, intf): self._template_processor["intf"] = intf self._template_processor["INTF_NAME"] = self._name_creator.replace_camel_case(intf.name, "_").upper() prefix = intf.cfunc_prefix or self._name_creator.replace_camel_case(intf.name, "_").lower() module_prefix = self._template_processor.getSymbol("module_prefix") if module_prefix: prefix = module_prefix + "_" + prefix self._template_processor["intf_prefix"] = prefix if intf.signals: self._marshaller_names = CMarshallerNameCreator(prefix) self._template_processor["marshaller_func"] = self._marshaller_names.create_marshaller_name else: self._marshaller_names = None def _setup_genum_symbols(self, enum): self._template_processor["enum"] = enum self._template_processor["ENUM_NAME"] = self._name_creator.replace_camel_case(enum.name, "_").upper() self._template_processor["FullEnumName"] = self._template_processor.getSymbol("ModulePrefix") + enum.name prefix = self._name_creator.replace_camel_case(enum.name, "_").lower() module_prefix = self._template_processor.getSymbol("module_prefix") if module_prefix: prefix = module_prefix + "_" + prefix self._template_processor["enum_prefix"] = prefix def _setup_gflags_symbols(self, flags): self._template_processor["flags"] = flags prefix = self._name_creator.replace_camel_case(flags.name, "_").lower() module_prefix = self._template_processor.getSymbol("module_prefix") if module_prefix: prefix = module_prefix + "_" + prefix self._template_processor["flags_prefix"] = prefix def _setup_gerror_symbols(self, error_domain): self._template_processor["error_domain"] = error_domain prefix = self._name_creator.replace_camel_case(error_domain.name, "_").lower() module_prefix = self._template_processor.getSymbol("module_prefix") if module_prefix: prefix = module_prefix + "_" + prefix self._template_processor["error_domain_prefix"] = prefix def _header_comment(self): if not self._config.header_text_file: return """/* * This file has been automatically generated by GObjectCreator3 * (see https://github.com/ThomasBollmeier/GObjectCreator3 for details) */ """ else: res = "" f = open(self._config.header_text_file) lines = f.readlines() f.close for line in lines: res += line return res def _increment(self, value): return value + 1 def _is_empty(self, data): return bool(data) == False def _is_none(self, data): return data is None def _to_upper(self, text): return text.upper() def _to_lower(self, text): return text.lower() def _literal_trim(self, text): if len(text) > 2: return text[1:-1] else: return "" def _length(self, data): try: return len(data) except TypeError as error: raise error def _method_result(self, method): result_type = "void" for p in method.parameters: type_name = self._name_creator.create_full_type_name(p.type) if isinstance(p.type, Type) and ( p.type.category == Type.OBJECT or p.type.category == Type.INTERFACE ): type_name += "*" if "const" in p.modifiers: type_name = "const " + type_name if p.direction == Parameter.OUT: result_type = type_name break return self._rearrange_asterisk(result_type) def _method_basename(self, cls, method_info ): method_or_name, intf = method_info if not isinstance(method_or_name, str): res = method_or_name.name else: res = method_or_name if intf: method_prefix = intf.cfunc_prefix or intf.name.lower() mod_prefix = self._module_prefix_relative(intf.module, cls.module) if mod_prefix: method_prefix = mod_prefix + "_" + method_prefix res = method_prefix + "_" + res return res def _method_signature(self, cls, method, suppress_param_names=False, insert_line_breaks=True, indent_level=1, instance_name="self" ): res = "" params = [] for p in method.parameters: type_name = self._name_creator.create_full_type_name(p.type) if isinstance(p.type, Type) and ( p.type.category == Type.OBJECT or p.type.category == Type.INTERFACE ): type_name += "*" if "const" in p.modifiers: type_name = "const " + type_name if p.direction != Parameter.OUT: params.append((type_name, p.name)) if not method.is_static: cls_type = self._name_creator.create_full_type_name(cls) params.insert(0, (cls_type + "*", instance_name)) if len(params) == 0: res = "void" elif len(params) == 1: res = params[0][0] if not suppress_param_names: res = self._rearrange_asterisk(res, params[0][1]) else: for param in params: if res: res += ", " if insert_line_breaks: res += "\n" res += indent_level * "\t" typename = param[0] if not suppress_param_names: res += self._rearrange_asterisk(typename, param[1]) else: res += typename if insert_line_breaks: res += "\n" res += indent_level * "\t" return res def _method_call_args(self, method, insert_line_breaks = True, indent_level = 1, instance_name = "self" ): args = [p.name for p in method.parameters if p.direction != Parameter.OUT] if not method.is_static: args.insert(0, instance_name) num_args = len(args) if num_args == 0: res = "" elif num_args == 1: res = args[0] else: res = "" for arg in args: if res: res += "," if insert_line_breaks: res += "\n" res += indent_level * "\t" res += arg if insert_line_breaks: res += "\n" res += indent_level * "\t" return res def _method_signature_by_name(self, cls, method_name, suppress_param_names=False, insert_line_breaks=True, indent_level=1, instance_name="self" ): minfo = cls.get_method_info(method_name) return self._method_signature( minfo.def_origin, minfo.method, suppress_param_names, insert_line_breaks, indent_level, instance_name ) def _method_by_name(self, cls, method_name, intf=None): minfo = cls.get_method_info(method_name, intf) return minfo.method def _method_def_class(self, cls, method_name, intf=None): minfo = cls.get_method_info(method_name, intf) if minfo: return minfo.def_origin else: raise Exception("No class found for method '%s'" % method_name) def _method_def_class_cast(self, cls, method_name, intf=None): minfo = cls.get_method_info(method_name, intf) defcls = minfo.def_origin class_name = self._name_creator.replace_camel_case(defcls.name, "_").upper() module_prefix = "" module = defcls.module while module and module.name: if module_prefix: module_prefix = "_" + module_prefix module_prefix = module.name.upper() + module_prefix module = module.module res = class_name + "_CLASS" if module_prefix: res = module_prefix + "_" + res return res def _signal_technical_name(self, signal): return signal.name.replace("-", "_") def _signal_section_defhandler(self, signal): return "default_handler_" + self._signal_technical_name(signal) def _rearrange_asterisk(self, typename, parname=None): match = self._regex_type_w_ptrs.match(typename) if match: if parname: typename = match.group(1) parname = match.group(3) + parname else: typename = match.group(1) + " " + match.group(3) if parname: return typename + " " + parname else: return typename def _property_flags(self, prop): flags = "" for access_mode in prop.access: if flags: flags += "|" flags += { PropAccess.READ: "G_PARAM_READABLE", PropAccess.WRITE: "G_PARAM_WRITABLE", PropAccess.INIT: "G_PARAM_CONSTRUCT", PropAccess.INIT_ONLY: "G_PARAM_CONSTRUCT_ONLY" }[access_mode] return flags def _property_value(self, val): if val.literal: return val.literal elif val.number_info: if not val.number_info.decimals: return "%d" % val.number_info.digits else: return "%d.%d" % (val.number_info.digits, val.number_info.decimals) elif val.code_info: enum_name = self._name_creator.create_full_type_name(val.code_info.enumeration) enum_name = self._name_creator.replace_camel_case(enum_name, "_").upper() return enum_name + "_" + val.code_info.code_name elif val.boolean is not None: return val.boolean and "TRUE" or "FALSE" def _property_gtype(self, gtype_value): if gtype_value.gtype_id: return gtype_value.gtype_id else: return self._name_creator.create_type_macro(gtype_value.type) def _property_setter_section(self, prop): return "setter_" + prop.name.replace("-", "_").lower() def _property_getter_section(self, prop): return "getter_" + prop.name.replace("-", "_").lower() def _property_set_section(self, prop): return "set_" + prop.name.replace("-", "_").lower() def _property_get_section(self, prop): return "get_" + prop.name.replace("-", "_").lower() def _interface_impl_funcname(self, cls, intf, method_name): method_prefix = intf.cfunc_prefix or intf.name.lower() module_predix = self._module_prefix_relative(intf.module, cls.module) if module_predix: method_prefix = module_predix + "_" + method_prefix return method_prefix + "_" + method_name def _is_property_init_required(self, obj): if obj.get_properties(): return True for intf in obj.interfaces: if intf.properties: return True return False def _module_prefix(self, module): res = module.cfunc_prefix or module.name.lower() curmod = module while curmod.module: curmod = curmod.module tmp = curmod.cfunc_prefix or curmod.name.lower() if tmp: res = tmp + "_" + res return res def _module_prefix_relative(self, module, root): res = "" abspath_module = self._get_abs_module_path(module) abspath_root = self._get_abs_module_path(root) len_rootpath = len(abspath_root) relpath = [] for idx, m in enumerate(abspath_module): if not relpath and idx < len_rootpath and m == abspath_root[idx]: continue relpath.append(m) for m in relpath: if res: res += "_" res += m.cfunc_prefix or m.name.lower() return res def _get_abs_module_path(self, module): res = [module] curmod = module while curmod.module: curmod = curmod.module res.insert(0, curmod) return res ```
[ { "content": "```python\n# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Pub...
[ { "content": "<|memory_start|>```python\n# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the...
```python # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """A HintManager to draw hints over links.""" import collections import functools import math import re from string import ascii_lowercase from PyQt5.QtCore import (pyqtSignal, pyqtSlot, QObject, QEvent, Qt, QUrl, QTimer) from PyQt5.QtGui import QMouseEvent from PyQt5.QtWebKit import QWebElement from PyQt5.QtWebKitWidgets import QWebPage from qutebrowser.config import config from qutebrowser.keyinput import modeman, modeparsers from qutebrowser.browser import webelem from qutebrowser.commands import userscripts, cmdexc, cmdutils, runners from qutebrowser.utils import usertypes, log, qtutils, message, objreg, utils ElemTuple = collections.namedtuple('ElemTuple', ['elem', 'label']) Target = usertypes.enum('Target', ['normal', 'current', 'tab', 'tab_fg', 'tab_bg', 'window', 'yank', 'yank_primary', 'run', 'fill', 'hover', 'download', 'userscript', 'spawn']) class WordHintingError(Exception): """Exception raised on errors during word hinting.""" @pyqtSlot(usertypes.KeyMode) def on_mode_entered(mode, win_id): """Stop hinting when insert mode was entered.""" if mode == usertypes.KeyMode.insert: modeman.maybe_leave(win_id, usertypes.KeyMode.hint, 'insert mode') class HintContext: """Context namespace used for hinting. Attributes: frames: The QWebFrames to use. destroyed_frames: id()'s of QWebFrames which have been destroyed. (Workaround for https://github.com/The-Compiler/qutebrowser/issues/152) all_elems: A list of all (elem, label) namedtuples ever created. elems: A mapping from key strings to (elem, label) namedtuples. May contain less elements than `all_elems` due to filtering. baseurl: The URL of the current page. target: What to do with the opened links. normal/current/tab/tab_fg/tab_bg/window: Get passed to BrowserTab. yank/yank_primary: Yank to clipboard/primary selection. run: Run a command. fill: Fill commandline with link. download: Download the link. userscript: Call a custom userscript. spawn: Spawn a simple command. to_follow: The link to follow when enter is pressed. args: Custom arguments for userscript/spawn rapid: Whether to do rapid hinting. mainframe: The main QWebFrame where we started hinting in. group: The group of web elements to hint. """ def __init__(self): self.all_elems = [] self.elems = {} self.target = None self.baseurl = None self.to_follow = None self.rapid = False self.frames = [] self.destroyed_frames = [] self.args = [] self.mainframe = None self.group = None def get_args(self, urlstr): """Get the arguments, with {hint-url} replaced by the given URL.""" args = [] for arg in self.args: arg = arg.replace('{hint-url}', urlstr) args.append(arg) return args class HintManager(QObject): """Manage drawing hints over links or other elements. Class attributes: HINT_TEXTS: Text displayed for different hinting modes. Attributes: _context: The HintContext for the current invocation. _win_id: The window ID this HintManager is associated with. _tab_id: The tab ID this HintManager is associated with. _filterstr: Used to save the filter string for restoring in rapid mode. Signals: mouse_event: Mouse event to be posted in the web view. arg: A QMouseEvent start_hinting: Emitted when hinting starts, before a link is clicked. arg: The ClickTarget to use. stop_hinting: Emitted after a link was clicked. """ HINT_TEXTS = { Target.normal: "Follow hint", Target.current: "Follow hint in current tab", Target.tab: "Follow hint in new tab", Target.tab_fg: "Follow hint in foreground tab", Target.tab_bg: "Follow hint in background tab", Target.window: "Follow hint in new window", Target.yank: "Yank hint to clipboard", Target.yank_primary: "Yank hint to primary selection", Target.run: "Run a command on a hint", Target.fill: "Set hint in commandline", Target.hover: "Hover over a hint", Target.download: "Download hint", Target.userscript: "Call userscript via hint", Target.spawn: "Spawn command via hint", } mouse_event = pyqtSignal('QMouseEvent') start_hinting = pyqtSignal(usertypes.ClickTarget) stop_hinting = pyqtSignal() def __init__(self, win_id, tab_id, parent=None): """Constructor.""" super().__init__(parent) self._win_id = win_id self._tab_id = tab_id self._context = None self._filterstr = None self._word_hinter = WordHinter() mode_manager = objreg.get('mode-manager', scope='window', window=win_id) mode_manager.left.connect(self.on_mode_left) def _get_text(self): """Get a hint text based on the current context.""" text = self.HINT_TEXTS[self._context.target] if self._context.rapid: text += ' (rapid mode)' text += '...' return text def _cleanup(self): """Clean up after hinting.""" for elem in self._context.all_elems: try: elem.label.removeFromDocument() except webelem.IsNullError: pass for f in self._context.frames: log.hints.debug("Disconnecting frame {}".format(f)) if id(f) in self._context.destroyed_frames: # WORKAROUND for # https://github.com/The-Compiler/qutebrowser/issues/152 log.hints.debug("Frame has been destroyed, ignoring.") continue try: f.contentsSizeChanged.disconnect(self.on_contents_size_changed) except TypeError: # It seems we can get this here: # TypeError: disconnect() failed between # 'contentsSizeChanged' and 'on_contents_size_changed' # See # https://github.com/The-Compiler/qutebrowser/issues/263 pass log.hints.debug("Disconnected.") text = self._get_text() message_bridge = objreg.get('message-bridge', scope='window', window=self._win_id) message_bridge.maybe_reset_text(text) self._context = None def _hint_strings(self, elems): """Calculate the hint strings for elems. Inspired by Vimium. Args: elems: The elements to get hint strings for. Return: A list of hint strings, in the same order as the elements. """ hint_mode = config.get('hints', 'mode') if hint_mode == 'word': try: return self._word_hinter.hint(elems) except WordHintingError as e: message.error(self._win_id, str(e), immediately=True) # falls back on letter hints if hint_mode == 'number': chars = '0123456789' else: chars = config.get('hints', 'chars') min_chars = config.get('hints', 'min-chars') if config.get('hints', 'scatter') and hint_mode != 'number': return self._hint_scattered(min_chars, chars, elems) else: return self._hint_linear(min_chars, chars, elems) def _hint_scattered(self, min_chars, chars, elems): """Produce scattered hint labels with variable length (like Vimium). Args: min_chars: The minimum length of labels. chars: The alphabet to use for labels. elems: The elements to generate labels for. """ # Determine how many digits the link hints will require in the worst # case. Usually we do not need all of these digits for every link # single hint, so we can show shorter hints for a few of the links. needed = max(min_chars, math.ceil(math.log(len(elems), len(chars)))) # Short hints are the number of hints we can possibly show which are # (needed - 1) digits in length. if needed > min_chars: short_count = math.floor((len(chars) ** needed - len(elems)) / len(chars)) else: short_count = 0 long_count = len(elems) - short_count strings = [] if needed > 1: for i in range(short_count): strings.append(self._number_to_hint_str(i, chars, needed - 1)) start = short_count * len(chars) for i in range(start, start + long_count): strings.append(self._number_to_hint_str(i, chars, needed)) return self._shuffle_hints(strings, len(chars)) def _hint_linear(self, min_chars, chars, elems): """Produce linear hint labels with constant length (like dwb). Args: min_chars: The minimum length of labels. chars: The alphabet to use for labels. elems: The elements to generate labels for. """ strings = [] needed = max(min_chars, math.ceil(math.log(len(elems), len(chars)))) for i in range(len(elems)): strings.append(self._number_to_hint_str(i, chars, needed)) return strings def _shuffle_hints(self, hints, length): """Shuffle the given set of hints so that they're scattered. Hints starting with the same character will be spread evenly throughout the array. Inspired by Vimium. Args: hints: A list of hint strings. length: Length of the available charset. Return: A list of shuffled hint strings. """ buckets = [[] for i in range(length)] for i, hint in enumerate(hints): buckets[i % len(buckets)].append(hint) result = [] for bucket in buckets: result += bucket return result def _number_to_hint_str(self, number, chars, digits=0): """Convert a number like "8" into a hint string like "JK". This is used to sequentially generate all of the hint text. The hint string will be "padded with zeroes" to ensure its length is >= digits. Inspired by Vimium. Args: number: The hint number. chars: The charset to use. digits: The minimum output length. Return: A hint string. """ base = len(chars) hintstr = [] remainder = 0 while True: remainder = number % base hintstr.insert(0, chars[remainder]) number -= remainder number //= base if number <= 0: break # Pad the hint string we're returning so that it matches digits. for _ in range(0, digits - len(hintstr)): hintstr.insert(0, chars[0]) return ''.join(hintstr) def _is_hidden(self, elem): """Check if the element is hidden via display=none.""" display = elem.styleProperty('display', QWebElement.InlineStyle) return display == 'none' def _show_elem(self, elem): """Show a given element.""" elem.setStyleProperty('display', 'inline !important') def _hide_elem(self, elem): """Hide a given element.""" elem.setStyleProperty('display', 'none !important') def _set_style_properties(self, elem, label): """Set the hint CSS on the element given. Args: elem: The QWebElement to set the style attributes for. label: The label QWebElement. """ attrs = [ ('display', 'inline !important'), ('z-index', '{} !important'.format(int(2 ** 32 / 2 - 1))), ('pointer-events', 'none !important'), ('position', 'fixed !important'), ('color', config.get('colors', 'hints.fg') + ' !important'), ('background', config.get('colors', 'hints.bg') + ' !important'), ('font', config.get('fonts', 'hints') + ' !important'), ('border', config.get('hints', 'border') + ' !important'), ('opacity', str(config.get('hints', 'opacity')) + ' !important'), ] # Make text uppercase if set in config if (config.get('hints', 'uppercase') and config.get('hints', 'mode') == 'letter'): attrs.append(('text-transform', 'uppercase !important')) else: attrs.append(('text-transform', 'none !important')) for k, v in attrs: label.setStyleProperty(k, v) self._set_style_position(elem, label) def _set_style_position(self, elem, label): """Set the CSS position of the label element. Args: elem: The QWebElement to set the style attributes for. label: The label QWebElement. """ rect = elem.rect_on_view(adjust_zoom=False) left = rect.x() top = rect.y() log.hints.vdebug("Drawing label '{!r}' at {}/{} for element '{!r}'" .format(label, left, top, elem)) label.setStyleProperty('left', '{}px !important'.format(left)) label.setStyleProperty('top', '{}px !important'.format(top)) def _draw_label(self, elem, string): """Draw a hint label over an element. Args: elem: The QWebElement to use. string: The hint string to print. Return: The newly created label element """ doc = elem.webFrame().documentElement() # It seems impossible to create an empty QWebElement for which isNull() # is false so we can work with it. # As a workaround, we use appendInside() with markup as argument, and # then use lastChild() to get a reference to it. # See: http://stackoverflow.com/q/7364852/2085149 body = doc.findFirst('body') if not body.isNull(): parent = body else: parent = doc parent.appendInside('<span></span>') label = webelem.WebElementWrapper(parent.lastChild()) label['class'] = 'qutehint' self._set_style_properties(elem, label) label.setPlainText(string) return label def _show_url_error(self): """Show an error because no link was found.""" message.error(self._win_id, "No suitable link found for this element.", immediately=True) def _click(self, elem, context): """Click an element. Args: elem: The QWebElement to click. context: The HintContext to use. """ target_mapping = { Target.normal: usertypes.ClickTarget.normal, Target.current: usertypes.ClickTarget.normal, Target.tab_fg: usertypes.ClickTarget.tab, Target.tab_bg: usertypes.ClickTarget.tab_bg, Target.window: usertypes.ClickTarget.window, Target.hover: usertypes.ClickTarget.normal, } if config.get('tabs', 'background-tabs'): target_mapping[Target.tab] = usertypes.ClickTarget.tab_bg else: target_mapping[Target.tab] = usertypes.ClickTarget.tab # Click the center of the largest square fitting into the top/left # corner of the rectangle, this will help if part of the <a> element # is hidden behind other elements # https://github.com/The-Compiler/qutebrowser/issues/1005 rect = elem.rect_on_view() if rect.width() > rect.height(): rect.setWidth(rect.height()) else: rect.setHeight(rect.width()) pos = rect.center() action = "Hovering" if context.target == Target.hover else "Clicking" log.hints.debug("{} on '{}' at position {}".format( action, elem.debug_text(), pos)) self.start_hinting.emit(target_mapping[context.target]) if context.target in [Target.tab, Target.tab_fg, Target.tab_bg, Target.window]: modifiers = Qt.ControlModifier else: modifiers = Qt.NoModifier events = [ QMouseEvent(QEvent.MouseMove, pos, Qt.NoButton, Qt.NoButton, Qt.NoModifier), ] if context.target != Target.hover: events += [ QMouseEvent(QEvent.MouseButtonPress, pos, Qt.LeftButton, Qt.LeftButton, modifiers), QMouseEvent(QEvent.MouseButtonRelease, pos, Qt.LeftButton, Qt.NoButton, modifiers), ] if context.target in [Target.normal, Target.current]: # Set the pre-jump mark ', so we can jump back here after following tabbed_browser = objreg.get('tabbed-browser', scope='window', window=self._win_id) tabbed_browser.set_mark("'") if context.target == Target.current: elem.remove_blank_target() for evt in events: self.mouse_event.emit(evt) if elem.is_text_input() and elem.is_editable(): QTimer.singleShot(0, functools.partial( elem.webFrame().page().triggerAction, QWebPage.MoveToEndOfDocument)) QTimer.singleShot(0, self.stop_hinting.emit) def _yank(self, url, context): """Yank an element to the clipboard or primary selection. Args: url: The URL to open as a QUrl. context: The HintContext to use. """ sel = (context.target == Target.yank_primary and utils.supports_selection()) urlstr = url.toString(QUrl.FullyEncoded | QUrl.RemovePassword) utils.set_clipboard(urlstr, selection=sel) msg = "Yanked URL to {}: {}".format( "primary selection" if sel else "clipboard", urlstr) message.info(self._win_id, msg) def _run_cmd(self, url, context): """Run the command based on a hint URL. Args: url: The URL to open as a QUrl. context: The HintContext to use. """ urlstr = url.toString(QUrl.FullyEncoded) args = context.get_args(urlstr) commandrunner = runners.CommandRunner(self._win_id) commandrunner.run_safely(' '.join(args)) def _preset_cmd_text(self, url, context): """Preset a commandline text based on a hint URL. Args: url: The URL to open as a QUrl. context: The HintContext to use. """ urlstr = url.toDisplayString(QUrl.FullyEncoded) args = context.get_args(urlstr) text = ' '.join(args) if text[0] not in modeparsers.STARTCHARS: message.error(self._win_id, "Invalid command text '{}'.".format(text), immediately=True) else: message.set_cmd_text(self._win_id, text) def _download(self, elem, context): """Download a hint URL. Args: elem: The QWebElement to download. _context: The HintContext to use. """ url = self._resolve_url(elem, context.baseurl) if url is None: self._show_url_error() return if context.rapid: prompt = False else: prompt = None download_manager = objreg.get('download-manager', scope='window', window=self._win_id) download_manager.get(url, page=elem.webFrame().page(), prompt_download_directory=prompt) def _call_userscript(self, elem, context): """Call a userscript from a hint. Args: elem: The QWebElement to use in the userscript. context: The HintContext to use. """ cmd = context.args[0] args = context.args[1:] frame = context.mainframe env = { 'QUTE_MODE': 'hints', 'QUTE_SELECTED_TEXT': str(elem), 'QUTE_SELECTED_HTML': elem.toOuterXml(), } url = self._resolve_url(elem, context.baseurl) if url is not None: env['QUTE_URL'] = url.toString(QUrl.FullyEncoded) env.update(userscripts.store_source(frame)) userscripts.run(cmd, *args, win_id=self._win_id, env=env) def _spawn(self, url, context): """Spawn a simple command from a hint. Args: url: The URL to open as a QUrl. context: The HintContext to use. """ urlstr = url.toString(QUrl.FullyEncoded | QUrl.RemovePassword) args = context.get_args(urlstr) commandrunner = runners.CommandRunner(self._win_id) commandrunner.run_safely('spawn ' + ' '.join(args)) def _resolve_url(self, elem, baseurl): """Resolve a URL and check if we want to keep it. Args: elem: The QWebElement to get the URL of. baseurl: The baseurl of the current tab. Return: A QUrl with the absolute URL, or None. """ for attr in ('href', 'src'): if attr in elem: text = elem[attr].strip() break else: return None url = QUrl(text) if not url.isValid(): return None if url.isRelative(): url = baseurl.resolved(url) qtutils.ensure_valid(url) return url def _find_prevnext(self, frame, prev=False): """Find a prev/next element in frame.""" # First check for <link rel="prev(ious)|next"> elems = frame.findAllElements(webelem.SELECTORS[webelem.Group.links]) rel_values = ('prev', 'previous') if prev else ('next') for e in elems: e = webelem.WebElementWrapper(e) try: rel_attr = e['rel'] except KeyError: continue if rel_attr in rel_values: log.hints.debug("Found '{}' with rel={}".format( e.debug_text(), rel_attr)) return e # Then check for regular links/buttons. elems = frame.findAllElements( webelem.SELECTORS[webelem.Group.prevnext]) elems = [webelem.WebElementWrapper(e) for e in elems] filterfunc = webelem.FILTERS[webelem.Group.prevnext] elems = [e for e in elems if filterfunc(e)] option = 'prev-regexes' if prev else 'next-regexes' if not elems: return None for regex in config.get('hints', option): log.hints.vdebug("== Checking regex '{}'.".format(regex.pattern)) for e in elems: text = str(e) if not text: continue if regex.search(text): log.hints.debug("Regex '{}' matched on '{}'.".format( regex.pattern, text)) return e else: log.hints.vdebug("No match on '{}'!".format(text)) return None def _connect_frame_signals(self): """Connect the contentsSizeChanged signals to all frames.""" for f in self._context.frames: log.hints.debug("Connecting frame {}".format(f)) f.contentsSizeChanged.connect(self.on_contents_size_changed) def _check_args(self, target, *args): """Check the arguments passed to start() and raise if they're wrong. Args: target: A Target enum member. args: Arguments for userscript/download """ if not isinstance(target, Target): raise TypeError("Target {} is no Target member!".format(target)) if target in (Target.userscript, Target.spawn, Target.run, Target.fill): if not args: raise cmdexc.CommandError( "'args' is required with target userscript/spawn/run/" "fill.") else: if args: raise cmdexc.CommandError( "'args' is only allowed with target userscript/spawn.") def _init_elements(self): """Initialize the elements and labels based on the context set.""" elems = [] for f in self._context.frames: elems += f.findAllElements(webelem.SELECTORS[self._context.group]) elems = [e for e in elems if webelem.is_visible(e, self._context.mainframe)] # We wrap the elements late for performance reasons, as wrapping 1000s # of elements (with ~50 methods each) just takes too much time... elems = [webelem.WebElementWrapper(e) for e in elems] filterfunc = webelem.FILTERS.get(self._context.group, lambda e: True) elems = [e for e in elems if filterfunc(e)] if not elems: raise cmdexc.CommandError("No elements found.") strings = self._hint_strings(elems) log.hints.debug("hints: {}".format(', '.join(strings))) for e, string in zip(elems, strings): label = self._draw_label(e, string) elem = ElemTuple(e, label) self._context.all_elems.append(elem) self._context.elems[string] = elem keyparsers = objreg.get('keyparsers', scope='window', window=self._win_id) keyparser = keyparsers[usertypes.KeyMode.hint] keyparser.update_bindings(strings) def _filter_matches(self, filterstr, elemstr): """Return True if `filterstr` matches `elemstr`.""" # Empty string and None always match if not filterstr: return True filterstr = filterstr.casefold() elemstr = elemstr.casefold() # Do multi-word matching return all(word in elemstr for word in filterstr.split()) def follow_prevnext(self, frame, baseurl, prev=False, tab=False, background=False, window=False): """Click a "previous"/"next" element on the page. Args: frame: The frame where the element is in. baseurl: The base URL of the current tab. prev: True to open a "previous" link, False to open a "next" link. tab: True to open in a new tab, False for the current tab. background: True to open in a background tab. window: True to open in a new window, False for the current one. """ from qutebrowser.mainwindow import mainwindow elem = self._find_prevnext(frame, prev) if elem is None: raise cmdexc.CommandError("No {} links found!".format( "prev" if prev else "forward")) url = self._resolve_url(elem, baseurl) if url is None: raise cmdexc.CommandError("No {} links found!".format( "prev" if prev else "forward")) qtutils.ensure_valid(url) if window: new_window = mainwindow.MainWindow() new_window.show() tabbed_browser = objreg.get('tabbed-browser', scope='window', window=new_window.win_id) tabbed_browser.tabopen(url, background=False) elif tab: tabbed_browser = objreg.get('tabbed-browser', scope='window', window=self._win_id) tabbed_browser.tabopen(url, background=background) else: webview = objreg.get('webview', scope='tab', window=self._win_id, tab=self._tab_id) webview.openurl(url) @cmdutils.register(instance='hintmanager', scope='tab', name='hint', star_args_optional=True, maxsplit=2) @cmdutils.argument('win_id', win_id=True) def start(self, rapid=False, group=webelem.Group.all, target=Target.normal, *args, win_id): """Start hinting. Args: rapid: Whether to do rapid hinting. This is only possible with targets `tab` (with background-tabs=true), `tab-bg`, `window`, `run`, `hover`, `userscript` and `spawn`. group: The hinting mode to use. - `all`: All clickable elements. - `links`: Only links. - `images`: Only images. target: What to do with the selected element. - `normal`: Open the link. - `current`: Open the link in the current tab. - `tab`: Open the link in a new tab (honoring the background-tabs setting). - `tab-fg`: Open the link in a new foreground tab. - `tab-bg`: Open the link in a new background tab. - `window`: Open the link in a new window. - `hover` : Hover over the link. - `yank`: Yank the link to the clipboard. - `yank-primary`: Yank the link to the primary selection. - `run`: Run the argument as command. - `fill`: Fill the commandline with the command given as argument. - `download`: Download the link. - `userscript`: Call a userscript with `$QUTE_URL` set to the link. - `spawn`: Spawn a command. *args: Arguments for spawn/userscript/run/fill. - With `spawn`: The executable and arguments to spawn. `{hint-url}` will get replaced by the selected URL. - With `userscript`: The userscript to execute. Either store the userscript in `~/.local/share/qutebrowser/userscripts` (or `$XDG_DATA_DIR`), or use an absolute path. - With `fill`: The command to fill the statusbar with. `{hint-url}` will get replaced by the selected URL. - With `run`: Same as `fill`. """ tabbed_browser = objreg.get('tabbed-browser', scope='window', window=self._win_id) widget = tabbed_browser.currentWidget() if widget is None: raise cmdexc.CommandError("No WebView available yet!") mainframe = widget.page().mainFrame() if mainframe is None: raise cmdexc.CommandError("No frame focused!") mode_manager = objreg.get('mode-manager', scope='window', window=self._win_id) if mode_manager.mode == usertypes.KeyMode.hint: modeman.leave(win_id, usertypes.KeyMode.hint, 're-hinting') if rapid: if target in [Target.tab_bg, Target.window, Target.run, Target.hover, Target.userscript, Target.spawn, Target.download, Target.normal, Target.current]: pass elif (target == Target.tab and config.get('tabs', 'background-tabs')): pass else: name = target.name.replace('_', '-') raise cmdexc.CommandError("Rapid hinting makes no sense with " "target {}!".format(name)) self._check_args(target, *args) self._context = HintContext() self._context.target = target self._context.rapid = rapid try: self._context.baseurl = tabbed_browser.current_url() except qtutils.QtValueError: raise cmdexc.CommandError("No URL set for this page yet!") self._context.frames = webelem.get_child_frames(mainframe) for frame in self._context.frames: # WORKAROUND for # https://github.com/The-Compiler/qutebrowser/issues/152 frame.destroyed.connect(functools.partial( self._context.destroyed_frames.append, id(frame))) self._context.args = args self._context.mainframe = mainframe self._context.group = group self._init_elements() message_bridge = objreg.get('message-bridge', scope='window', window=self._win_id) message_bridge.set_text(self._get_text()) self._connect_frame_signals() modeman.enter(self._win_id, usertypes.KeyMode.hint, 'HintManager.start') def handle_partial_key(self, keystr): """Handle a new partial keypress.""" log.hints.debug("Handling new keystring: '{}'".format(keystr)) for string, elem in self._context.elems.items(): try: if string.startswith(keystr): matched = string[:len(keystr)] rest = string[len(keystr):] match_color = config.get('colors', 'hints.fg.match') elem.label.setInnerXml( '<font color="{}">{}</font>{}'.format( match_color, matched, rest)) if self._is_hidden(elem.label): # hidden element which matches again -> show it self._show_elem(elem.label) else: # element doesn't match anymore -> hide it self._hide_elem(elem.label) except webelem.IsNullError: pass def _filter_number_hints(self): """Apply filters for numbered hints and renumber them. Return: Elements which are still visible """ # renumber filtered hints elems = [] for e in self._context.all_elems: try: if not self._is_hidden(e.label): elems.append(e) except webelem.IsNullError: pass if not elems: # Whoops, filtered all hints modeman.leave(self._win_id, usertypes.KeyMode.hint, 'all filtered') return {} strings = self._hint_strings(elems) self._context.elems = {} for elem, string in zip(elems, strings): elem.label.setInnerXml(string) self._context.elems[string] = elem keyparsers = objreg.get('keyparsers', scope='window', window=self._win_id) keyparser = keyparsers[usertypes.KeyMode.hint] keyparser.update_bindings(strings, preserve_filter=True) return self._context.elems def _filter_non_number_hints(self): """Apply filters for letter/word hints. Return: Elements which are still visible """ visible = {} for string, elem in self._context.elems.items(): try: if not self._is_hidden(elem.label): visible[string] = elem except webelem.IsNullError: pass if not visible: # Whoops, filtered all hints modeman.leave(self._win_id, usertypes.KeyMode.hint, 'all filtered') return visible def filter_hints(self, filterstr): """Filter displayed hints according to a text. Args: filterstr: The string to filter with, or None to use the filter from previous call (saved in `self._filterstr`). If `filterstr` is an empty string or if both `filterstr` and `self._filterstr` are None, all hints are shown. """ if filterstr is None: filterstr = self._filterstr else: self._filterstr = filterstr for elem in self._context.all_elems: try: if self._filter_matches(filterstr, str(elem.elem)): if self._is_hidden(elem.label): # hidden element which matches again -> show it self._show_elem(elem.label) else: # element doesn't match anymore -> hide it self._hide_elem(elem.label) except webelem.IsNullError: pass if config.get('hints', 'mode') == 'number': visible = self._filter_number_hints() else: visible = self._filter_non_number_hints() if (len(visible) == 1 and config.get('hints', 'auto-follow') and filterstr is not None): # apply auto-follow-timeout timeout = config.get('hints', 'auto-follow-timeout') keyparsers = objreg.get('keyparsers', scope='window', window=self._win_id) normal_parser = keyparsers[usertypes.KeyMode.normal] normal_parser.set_inhibited_timeout(timeout) # unpacking gets us the first (and only) key in the dict. self.fire(*visible) def fire(self, keystr, force=False): """Fire a completed hint. Args: keystr: The keychain string to follow. force: When True, follow even when auto-follow is false. """ if not (force or config.get('hints', 'auto-follow')): self.handle_partial_key(keystr) self._context.to_follow = keystr return # Handlers which take a QWebElement elem_handlers = { Target.normal: self._click, Target.current: self._click, Target.tab: self._click, Target.tab_fg: self._click, Target.tab_bg: self._click, Target.window: self._click, Target.hover: self._click, # _download needs a QWebElement to get the frame. Target.download: self._download, Target.userscript: self._call_userscript, } # Handlers which take a QUrl url_handlers = { Target.yank: self._yank, Target.yank_primary: self._yank, Target.run: self._run_cmd, Target.fill: self._preset_cmd_text, Target.spawn: self._spawn, } elem = self._context.elems[keystr].elem if elem.webFrame() is None: message.error(self._win_id, "This element has no webframe.", immediately=True) return if self._context.target in elem_handlers: handler = functools.partial(elem_handlers[self._context.target], elem, self._context) elif self._context.target in url_handlers: url = self._resolve_url(elem, self._context.baseurl) if url is None: self._show_url_error() return handler = functools.partial(url_handlers[self._context.target], url, self._context) else: raise ValueError("No suitable handler found!") if not self._context.rapid: modeman.maybe_leave(self._win_id, usertypes.KeyMode.hint, 'followed') else: # Reset filtering self.filter_hints(None) # Undo keystring highlighting for string, elem in self._context.elems.items(): elem.label.setInnerXml(string) handler() @cmdutils.register(instance='hintmanager', scope='tab', hide=True, modes=[usertypes.KeyMode.hint]) def follow_hint(self, keystring=None): """Follow a hint. Args: keystring: The hint to follow, or None. """ if keystring is None: if self._context.to_follow is None: raise cmdexc.CommandError("No hint to follow") else: keystring = self._context.to_follow elif keystring not in self._context.elems: raise cmdexc.CommandError("No hint {}!".format(keystring)) self.fire(keystring, force=True) @pyqtSlot('QSize') def on_contents_size_changed(self, _size): """Reposition hints if contents size changed.""" log.hints.debug("Contents size changed...!") for e in self._context.all_elems: try: if e.elem.webFrame() is None: # This sometimes happens for some reason... e.label.removeFromDocument() continue self._set_style_position(e.elem, e.label) except webelem.IsNullError: pass @pyqtSlot(usertypes.KeyMode) def on_mode_left(self, mode): """Stop hinting when hinting mode was left.""" if mode != usertypes.KeyMode.hint or self._context is None: # We have one HintManager per tab, so when this gets called, # self._context might be None, because the current tab is not # hinting. return self._cleanup() class WordHinter: """Generator for word hints. Attributes: words: A set of words to be used when no "smart hint" can be derived from the hinted element. """ def __init__(self): # will be initialized on first use. self.words = set() self.dictionary = None def ensure_initialized(self): """Generate the used words if yet uninitialized.""" dictionary = config.get("hints", "dictionary") if not self.words or self.dictionary != dictionary: self.words.clear() self.dictionary = dictionary try: with open(dictionary, encoding="UTF-8") as wordfile: alphabet = set(ascii_lowercase) hints = set() lines = (line.rstrip().lower() for line in wordfile) for word in lines: if set(word) - alphabet: # contains none-alphabetic chars continue if len(word) > 4: # we don't need words longer than 4 continue for i in range(len(word)): # remove all prefixes of this word hints.discard(word[:i + 1]) hints.add(word) self.words.update(hints) except IOError as e: error = "Word hints requires reading the file at {}: {}" raise WordHintingError(error.format(dictionary, str(e))) def extract_tag_words(self, elem): """Extract tag words form the given element.""" attr_extractors = { "alt": lambda elem: elem["alt"], "name": lambda elem: elem["name"], "title": lambda elem: elem["title"], "src": lambda elem: elem["src"].split('/')[-1], "href": lambda elem: elem["href"].split('/')[-1], "text": str, } extractable_attrs = collections.defaultdict(list, { "IMG": ["alt", "title", "src"], "A": ["title", "href", "text"], "INPUT": ["name"] }) return (attr_extractors[attr](elem) for attr in extractable_attrs[elem.tagName()] if attr in elem or attr == "text") def tag_words_to_hints(self, words): """Take words and transform them to proper hints if possible.""" for candidate in words: if not candidate: continue match = re.search('[A-Za-z]{3,}', candidate) if not match: continue if 4 < match.end() - match.start() < 8: yield candidate[match.start():match.end()].lower() def any_prefix(self, hint, existing): return any(hint.startswith(e) or e.startswith(hint) for e in existing) def filter_prefixes(self, hints, existing): return (h for h in hints if not self.any_prefix(h, existing)) def new_hint_for(self, elem, existing, fallback): """Return a hint for elem, not conflicting with the existing.""" new = self.tag_words_to_hints(self.extract_tag_words(elem)) new_no_prefixes = self.filter_prefixes(new, existing) fallback_no_prefixes = self.filter_prefixes(fallback, existing) # either the first good, or None return (next(new_no_prefixes, None) or next(fallback_no_prefixes, None)) def hint(self, elems): """Produce hint labels based on the html tags. Produce hint words based on the link text and random words from the words arg as fallback. Args: words: Words to use as fallback when no link text can be used. elems: The elements to get hint strings for. Return: A list of hint strings, in the same order as the elements. """ self.ensure_initialized() hints = [] used_hints = set() words = iter(self.words) for elem in elems: hint = self.new_hint_for(elem, used_hints, words) if not hint: raise WordHintingError("Not enough words in the dictionary.") used_hints.add(hint) hints.append(hint) return hints ```
[ { "content": "Here is the code block:\n```python\nfrom __future__ import print_function\nfrom icalendar import *\nfrom datetime import date, datetime, timedelta\nimport mysql.connector\nfrom mysql.connector import errorcode\nimport pickle\nimport csv\nimport pandas\nfrom pandas.io import sql\nimport matplotlib....
[ { "content": "Here is the code block:\n<|memory_start|>```python\nfrom __future__ import print_function\nfrom icalendar import *\nfrom datetime import date, datetime, timedelta\nimport mysql.connector\nfrom mysql.connector import errorcode\nimport pickle\nimport csv\nimport pandas\nfrom pandas.io import sql\nim...
```python from __future__ import print_function from icalendar import * from datetime import date, datetime, timedelta import mysql.connector from mysql.connector import errorcode import pickle import csv import pandas from pandas.io import sql import matplotlib.pyplot as plt import xlsxwriter import numpy as np import os import re import glob import pytz from StringIO import StringIO #from zipfile import ZipFile from urllib import urlopen import calendar_parser as cp # for calendar_parser, I downloaded the Python file created for this package # https://github.com/oblique63/Python-GoogleCalendarParser/blob/master/calendar_parser.py # and saved it in the working directory with my Python file (Jupyter Notebook file). # In calendar_parser.py, their function _fix_timezone is very crucial for my code to # display the correct local time. USER = # enter database username PASS = # enter database password HOST = # enter hostname, e.g. '127.0.0.1' cnx = mysql.connector.connect(user=USER, password=PASS, host=HOST) cursor = cnx.cursor() # Approach / Code modified from MySQL Connector web page DB_NAME = "CalDb" # 1) Creates database if it doesn't already exist # 2) Then connects to the database def create_database(cursor): try: cursor.execute( "CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(DB_NAME)) except mysql.connector.Error as err: print("Failed creating database: {}".format(err)) exit(1) try: cnx.database = DB_NAME except mysql.connector.Error as err: if err.errno == errorcode.ER_BAD_DB_ERROR: create_database(cursor) cnx.database = DB_NAME else: print(err) exit(1) # Create table specifications TABLES = {} TABLES['eBike'] = ( "CREATE TABLE IF NOT EXISTS `eBike` (" " `eBikeName` varchar(10)," " `Organizer` varchar(100)," " `Created` datetime NOT NULL," " `Start` datetime NOT NULL," " `End` datetime NOT NULL" ") ENGINE=InnoDB") # If table does not already exist, this code will create it based on specifications for name, ddl in TABLES.iteritems(): try: print("Creating table {}: ".format(name), end='') cursor.execute(ddl) except mysql.connector.Error as err: if err.errno == errorcode.ER_TABLE_EXISTS_ERROR: print("already exists.") else: print(err.msg) else: print("OK") # Obtain current count from each calendar to read in and add additional entries only cursor.execute("SELECT COUNT(*) FROM eBike WHERE eBikeName = 'Gold'") GoldExistingCount = cursor.fetchall() cursor.execute("SELECT COUNT(*) FROM eBike WHERE eBikeName = 'Blue'") BlueExistingCount = cursor.fetchall() # Declare lists eBikeName = [] Organizer = [] DTcreated = [] DTstart = [] DTend = [] Counter = 0 Cal1URL = # Google Calendar URL (from Calendar Settings -> Private Address) Cal2URL = # URL of second Google Calendar...can scale this code to as many calendars as desired # at an extremily large number (e.g. entire company level), could modify and use parallel processing (e.g. PySpark) Blue = Cal1URL Gold = Cal2URL URL_list = [Blue, Gold] for i in URL_list: Counter = 0 b = urlopen(i) cal = Calendar.from_ical(b.read()) timezones = cal.walk('VTIMEZONE') if (i == Blue): BlueLen = len(cal.walk()) elif (i == Gold): GoldLen = len(cal.walk()) #print (cal) #print ("Stuff") #print (cal.subcomponents) for k in cal.walk(): if k.name == "VEVENT": Counter += 1 if (i == Blue): if BlueLen - Counter > GoldExistingCount[0][0]: eBikeName.append('Blue') Organizer.append( re.sub(r'mailto:', "", str(k.get('ORGANIZER') ) ) ) DTcreated.append( cp._fix_timezone( k.decoded('CREATED'), pytz.timezone(timezones[0]['TZID']) ) ) DTstart.append( cp._fix_timezone( k.decoded('DTSTART'), pytz.timezone(timezones[0]['TZID']) ) ) DTend.append( cp._fix_timezone( k.decoded('DTEND'), pytz.timezone(timezones[0]['TZID']) ) ) #print (k.property_items('ATTENDEE')) elif (i == Gold): if GoldLen - Counter > BlueExistingCount[0][0]: eBikeName.append('Gold') Organizer.append( re.sub(r'mailto:', "", str(k.get('ORGANIZER') ) ) ) DTcreated.append( cp._fix_timezone( k.decoded('CREATED'), pytz.timezone(timezones[0]['TZID']) ) ) DTstart.append( cp._fix_timezone( k.decoded('DTSTART'), pytz.timezone(timezones[0]['TZID']) ) ) DTend.append( cp._fix_timezone( k.decoded('DTEND'), pytz.timezone(timezones[0]['TZID']) ) ) b.close() # Now that calendar data is fully read in, create a list with data in a format for # entering into the MySQL database. # # At this point, if the MySQL Connector component is not desired, other approaches # include creating a Pandas dataframe or something else. # For reference, a Pandas dataframe could be created with the following command: # df = pandas.DataFrame({'ORGANIZER' : Organizer,'CREATED' : DTcreated, 'DTSTART' : DTstart,'DTEND': DTend}) eBikeData = [] ##################################################### for i in range(len(DTcreated)): # Add in condition that the organizer email address cannot be 'none' or any other P&T Management email if (Organizer[i] != 'None' and Organizer[i] != 'lauren.bennett@berkeley.edu' and Organizer[i] != 'dmeroux@berkeley.edu' and Organizer[i] != 'berkeley.edu_534da9tjgdsahifulshf42lfbo@group.calendar.google.com'): eBikeData.append((eBikeName[i], Organizer[i], DTcreated[i], DTstart[i], DTend[i])) # Insert calendar data into MySQL table eBike cursor.executemany("INSERT INTO eBike (eBikeName, Organizer, Created, Start, End) VALUES (%s, %s, %s, %s, %s)", eBikeData) cnx.commit() # Find emails associated with reservations created at latest 6 days ago cursor.execute("SELECT DISTINCT Organizer FROM eBike WHERE DATEDIFF(CURDATE(), Start) <= 6 AND DATEDIFF(CURDATE(), Start) >= 0") WeeklyEmail = cursor.fetchall() Email = [] for i in range(len(WeeklyEmail)): Email.append(WeeklyEmail[i][0]) if(Email[i] != 'None'): print(Email[i]) # https://xlsxwriter.readthedocs.org # Workbook Document Name workbook = xlsxwriter.Workbook('E-BikeUpdate' + datetime.strftime(datetime.now(), "%Y-%m-%d") + '.xlsx') # Define 'bold' format bold = workbook.add_format({'bold': True}) format1 = workbook.add_format({'bold': 1, 'bg_color': '#3CDAE5', 'font_color': '#092A51'}) format2 = workbook.add_format({'bold': 1, 'bg_color': '#DA7BD0', 'font_color': '#A50202'}) # Add Intro Sheet worksheet = workbook.add_worksheet('INTRO') worksheet.write('A1', 'Sheet', bold) worksheet.write('A2', 'Ebike_Rides_by_User') worksheet.write('A3', 'Trips_by_Res_Time') worksheet.write('A4', 'Trips_by_Weekday') worksheet.write('A5', 'Utilization') worksheet.write('A6', 'Aggregate_Advance_Reservation') worksheet.write('A7', 'Time_Series_Advance_Reservation') worksheet.write('B1', 'Description', bold) worksheet.write('B2', 'Total E-Bike Rides by User Email') worksheet.write('B3', 'Total E-Bike Rides by Reservation Hour') worksheet.write('B4', 'Total E-Bike Rides by Weekday') worksheet.write('B5', 'Average and Maximum Percent and Hours Utilization') worksheet.write('B6', 'Number of Days E-Bikes Were Reserved in Advance, by Count of Reservations') worksheet.write('B7', 'Number of Days E-Bikes Were Reserved in Advance, by Reservation Start Datetime') ### Total e-Bike Rides by User cursor.execute("SELECT Organizer, COUNT(*) AS Total_Rides FROM eBike GROUP BY Organizer ORDER BY Total_Rides DESC;") TotalRides_by_User = cursor.fetchall() # Worksheet Name worksheet1 = workbook.add_worksheet('Ebike_Rides_by_User') # Column Names worksheet1.write('A1', 'User', bold) worksheet1.write('B1', 'Total Rides', bold) # Declare Starting Point for row, col row = 1 col = 0 # Iterate over the data and write it out row by row for UserEmail, UserRideCount in (TotalRides_by_User): worksheet1.write(row, col, UserEmail) worksheet1.write(row, col + 1, UserRideCount) row += 1 # Conditional Formatting: E-bike Users with 20+ Rides worksheet1.conditional_format('B1:B9999', {'type': 'cell', 'criteria': '>=', 'value': 20, 'format': format1}) ### Total Trips by Reservation Time cursor.execute("SELECT EXTRACT(HOUR FROM Start) AS Hour_24, DATE_FORMAT(Start, '%h %p') AS Reservation_Time, COUNT(*) AS Total_Rides FROM eBike GROUP BY Reservation_Time, Hour_24 ORDER BY Hour_24 ASC") Trips_by_Time = cursor.fetchall() # Worksheet Name worksheet2 = workbook.add_worksheet('Trips_by_Res_Time') # Data. # Column Names worksheet2.write('A1', 'Reservation Start Time', bold) worksheet2.write('B1', 'Total Rides', bold) # Declare Starting Point for row, col row = 1 col = 0 # Iterate over the data and write it out row by row for Hour_24, Reservation_Time, Total_Rides in (Trips_by_Time): worksheet2.write(row, col, Reservation_Time) worksheet2.write(row, col + 1, Total_Rides) row += 1 # Add Chart chart = workbook.add_chart({'type': 'line'}) # Add Data to Chart chart.add_series({ 'categories': '=Trips_by_Res_Time!$A$2:$A$16', 'values': '=Trips_by_Res_Time!$B$2:$B$16', 'fill': {'color': '#791484'}, 'border': {'color': '#52B7CB'} }) # Format Chart chart.set_title({ 'name': 'Total Rides by Reservation Start Time', 'name_font': { 'name': 'Calibri', 'color': '#52B7CB', }, }) chart.set_x_axis({ 'name': 'Reservation Start Time', 'empty_cells': 'gaps', 'name_font': { 'name': 'Calibri', 'color': '#52B7CB' }, 'num_font': { 'name': 'Arial', 'color': '#52B7CB', }, }) chart.set_y_axis({ 'name': 'Total Rides', 'empty_cells': 'gaps', 'name_font': { 'name': 'Calibri', 'color': '#52B7CB' }, 'num_font': { 'italic': True, 'color': '#52B7CB', }, }) # Remove Legend chart.set_legend({'position': 'none'}) # Insert Chart worksheet2.insert_chart('E1', chart) # GO TO END OF DATA ### Total Trips by Weekday cursor.execute("SELECT DAYNAME(Start) AS Weekday, COUNT(*) AS Total_Rides FROM eBike GROUP BY Weekday ORDER BY FIELD(Weekday, 'MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY', 'FRIDAY', 'SATURDAY', 'SUNDAY')") Trips_by_Weekday = cursor.fetchall() # Worksheet Name worksheet3 = workbook.add_worksheet('Trips_by_Weekday') # Column Names worksheet3.write('A1', 'Weekday', bold) worksheet3.write('B1', 'Total Rides', bold) # Declare Starting Point for row, col row = 1 col = 0 # Iterate over the data and write it out row by row for Weekday, Total_Rides_by_Weekday in (Trips_by_Weekday): worksheet3.write(row, col, Weekday) worksheet3.write(row, col + 1, Total_Rides_by_Weekday) row += 1 # Add Chart chart = workbook.add_chart({'type': 'line'}) # Add Data to Chart chart.add_series({ 'categories': '=Trips_by_Weekday!$A$2:$A$8)', 'values': '=Trips_by_Weekday!$B$2:$B$8)', 'fill': {'color': '#791484'}, 'border': {'color': '#52B7CB'} }) # Format Chart chart.set_title({ 'name': 'Total Rides by Weekday', 'name_font': { 'name': 'Calibri', 'color': '#52B7CB', }, }) chart.set_x_axis({ 'name': 'Weekday', 'name_font': { 'name': 'Calibri', 'color': '#52B7CB' }, 'num_font': { 'name': 'Arial', 'color': '#52B7CB', }, }) chart.set_y_axis({ 'name': 'Total Rides', 'name_font': { 'name': 'Calibri', 'color': '#52B7CB' }, 'num_font': { 'italic': True, 'color': '#52B7CB', }, }) # Remove Legend chart.set_legend({'position': 'none'}) # Insert Chart worksheet3.insert_chart('E1', chart) ### Average and Maximum Hours and Percent Utilization by Weekday cursor.execute("SELECT DAYNAME(Start) AS Weekday, MAX((HOUR(End - Start)*60 + MINUTE(End - Start))/60) AS Max_Hours, (MAX((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 AS Max_PCT_Utilization, AVG((HOUR(End - Start)*60 + MINUTE(End - Start))/60) AS Avg_Hours, (AVG((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 AS Avg_PCT_Utilization FROM eBike WHERE (((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 < 95 GROUP BY Weekday ORDER BY FIELD(Weekday, 'MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY', 'FRIDAY', 'SATURDAY', 'SUNDAY')") Avg_Max_Hours_PCTutilization_by_Weekday = cursor.fetchall() # Worksheet Name worksheet4 = workbook.add_worksheet('Utilization') # Column Names worksheet4.write('A1', 'Weekday', bold) worksheet4.write('B1', 'Maximum Reservation Duration (hrs)', bold) worksheet4.write('C1', 'Maximum Percentage Utilization', bold) worksheet4.write('D1', 'Average Reservation Duration (hrs)', bold) worksheet4.write('E1', 'Average Percent Utilization', bold) worksheet4.write('F1', 'NOTE: A small handfull of outliers above 95% utilization are excluded', bold) # Declare Starting Point for row, col row = 1 col = 0 # Iterate over the data and write it out row by row for Weekday_AMH, Max_Hours, Max_PCT_Utilization, Avg_Hours, Avg_PCT_Utilization in (Avg_Max_Hours_PCTutilization_by_Weekday): worksheet4.write(row, col, Weekday_AMH) worksheet4.write(row, col + 1, Max_Hours) worksheet4.write(row, col + 2, Max_PCT_Utilization) worksheet4.write(row, col + 3, Avg_Hours) worksheet4.write(row, col + 4, Avg_PCT_Utilization) row += 1 # Conditional Formatting: Percent Utilization Greater Than 50 worksheet4.conditional_format('E2:E8', {'type': 'cell', 'criteria': '>=', 'value': 30, 'format': format1}) ############################################ cursor.execute("SELECT Start, End, DAYNAME(Start) AS Weekday, ((HOUR(End - Start)*60 + MINUTE(End - Start))/60) AS Hours, (((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 AS PCT_Utilization FROM eBike ORDER BY (((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 DESC") Utilization = cursor.fetchall() worksheet4.write('A11', 'Reservation Start', bold) worksheet4.write('B11', 'Reservation End', bold) worksheet4.write('C11', 'Weekday', bold) worksheet4.write('D11', 'Hours Reserved', bold) worksheet4.write('E11', 'Percent Utilization', bold) row += 3 col = 0 count = 12 for Start, End, Day, Hour, PCT_Utilization in (Utilization): worksheet4.write(row, col, Start) ########################## https://xlsxwriter.readthedocs.io/working_with_dates_and_time.html worksheet4.write(row, col + 1, End) ##### worksheet4.write(row, col + 2, Day) ##### worksheet4.write(row, col + 3, Hour) worksheet4.write(row, col + 4, PCT_Utilization) row += 1 if (PCT_Utilization > 95.0): count += 1 # Add Chart chart = workbook.add_chart({'type': 'column'}) # Add Data to Chart chart.add_series({ 'values': '=Utilization!$E$'+str(count)+':$E$'+str(len(Utilization)), 'fill': {'color': '#52B7CB'}, 'border': {'color': '#52B7CB'} }) count = 0 # Format Chart chart.set_title({ 'name': 'Percent Utilization', 'name_font': { 'name': 'Calibri', 'color': '#52B7CB', }, }) chart.set_x_axis({ 'name': 'Reservation', 'name_font': { 'name': 'Calibri', 'color': '#52B7CB' }, 'num_font': { 'name': 'Arial', 'color': '#52B7CB', }, }) chart.set_y_axis({ 'name': 'Percent Utilization', 'name_font': { 'name': 'Calibri', 'color': '#52B7CB' }, 'num_font': { 'italic': True, 'color': '#52B7CB', }, }) # Remove Legend chart.set_legend({'position': 'none'}) # Insert Chart worksheet4.insert_chart('G4', chart) #### ### How far in advance reservations are created # How far in advance reservations are created cursor.execute("SELECT DATEDIFF(Start, Created) AS Days_Advance_Reservation, COUNT(*) AS Number_Reserved_Trips FROM eBike WHERE DATEDIFF(Start, Created) >= 0 GROUP BY Days_Advance_Reservation ORDER BY Days_Advance_Reservation DESC") Advance_Reservation = cursor.fetchall() # Worksheet Name worksheet5 = workbook.add_worksheet('Aggregate_Advance_Reservation') # Column Names worksheet5.write('A1', 'Days E-Bike was Reserved Ahead of Time', bold) worksheet5.write('B1', 'Total Reservations', bold) # Declare Starting Point for row, col row = 1 col = 0 # Iterate over the data and write it out row by row for Days_Advance_Reservation, Number_Reserved_Trips in (Advance_Reservation): worksheet5.write(row, col, Days_Advance_Reservation) worksheet5.write(row, col + 1, Number_Reserved_Trips) row += 1 worksheet5.conditional_format('B2:B9999', {'type': 'cell', 'criteria': '>=', 'value': 5, 'format': format2}) # Time series of how far in advance reservations are created cursor.execute("SELECT Start, DATEDIFF(Start, Created) AS Days_Advance_Reservation FROM eBike WHERE DATEDIFF(Start, Created) > 0 ORDER BY Start ASC") Time_Series_Advance_Reservation = cursor.fetchall() Starts = [] for i in range(0, len(Time_Series_Advance_Reservation)): Starts.append(str(Time_Series_Advance_Reservation[i][0])) # Worksheet Name worksheet6 = workbook.add_worksheet('Time_Series_Advance_Reservation') # Column Names worksheet6.write('A1', 'Reservation Start Date', bold) worksheet6.write('B1', 'Days E-Bike was Reserved Ahead of Time', bold) # Declare Starting Point for row, col row = 1 col = 0 # Iterate over the data and write it out row by row for StartVal in Starts: worksheet6.write(row, col, StartVal) row += 1 row = 1 for Start, Days_Advance_Reservation in (Time_Series_Advance_Reservation): worksheet6.write(row, col + 1, Days_Advance_Reservation) row += 1 # Add Chart chart = workbook.add_chart({'type': 'line'}) worksheet6.conditional_format('B2:B9999', {'type': 'cell', 'criteria': '>=', 'value': 5, 'format': format2}) workbook.close() cursor.close() cnx.close() ```
[ { "content": "```python\n# Copyright 2016 AT&T Corp\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses...
[ { "content": "<|memory_start|>```python\n# Copyright 2016 AT&T Corp\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apa...
```python # Copyright 2016 AT&T Corp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from murano.api.middleware import context from murano.tests.unit import base from oslo_config import cfg CONF = cfg.CONF class MiddlewareContextTest(base.MuranoTestCase): def test_middleware_context_default(self): middleware = context.ContextMiddleware(None) request_headers = { 'X-Roles': 'admin', 'X-User-Id': "", 'X-Tenant-Id': "", 'X-Configuration-Session': "", } request = webob.Request.blank('/environments', headers=request_headers) self.assertFalse(hasattr(request, 'context')) middleware.process_request(request) self.assertTrue(hasattr(request, 'context')) def test_factory_returns_filter(self): middleware = context.ContextMiddleware(None) result = middleware.factory(CONF) self.assertIsNotNone(result) ```
[ { "content": "Output the full code verbatim (no extra comments):\n```python\n# -*- coding: utf-8 -*-\n\"\"\"\nRegression tests for the Test Client, especially the customized assertions.\n\"\"\"\nimport os\nfrom django.conf import settings\n\nfrom django.test import Client, TestCase\nfrom django.test.utils impor...
[ { "content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\"\"\"\nRegression tests for the Test Client, especially the customized assertions.\n\"\"\"\nimport os\nfrom django.conf import settings\n\nfrom django.test import Client, TestCase\nfrom django....
```python # -*- coding: utf-8 -*- """ Regression tests for the Test Client, especially the customized assertions. """ import os from django.conf import settings from django.test import Client, TestCase from django.test.utils import ContextList from django.core.urlresolvers import reverse from django.core.exceptions import SuspiciousOperation from django.template import TemplateDoesNotExist, TemplateSyntaxError, Context from django.template import loader from django.test.client import encode_file class AssertContainsTests(TestCase): def setUp(self): self.old_templates = settings.TEMPLATE_DIRS settings.TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates'),) def tearDown(self): settings.TEMPLATE_DIRS = self.old_templates def test_contains(self): "Responses can be inspected for content, including counting repeated substrings" response = self.client.get('/test_client_regress/no_template_view/') self.assertNotContains(response, 'never') self.assertContains(response, 'never', 0) self.assertContains(response, 'once') self.assertContains(response, 'once', 1) self.assertContains(response, 'twice') self.assertContains(response, 'twice', 2) try: self.assertContains(response, 'text', status_code=999) except AssertionError, e: self.assertEquals(str(e), "Couldn't retrieve content: Response code was 200 (expected 999)") try: self.assertContains(response, 'text', status_code=999, msg_prefix='abc') except AssertionError, e: self.assertEquals(str(e), "abc: Couldn't retrieve content: Response code was 200 (expected 999)") try: self.assertNotContains(response, 'text', status_code=999) except AssertionError, e: self.assertEquals(str(e), "Couldn't retrieve content: Response code was 200 (expected 999)") try: self.assertNotContains(response, 'text', status_code=999, msg_prefix='abc') except AssertionError, e: self.assertEquals(str(e), "abc: Couldn't retrieve content: Response code was 200 (expected 999)") try: self.assertNotContains(response, 'once') except AssertionError, e: self.assertEquals(str(e), "Response should not contain 'once'") try: self.assertNotContains(response, 'once', msg_prefix='abc') except AssertionError, e: self.assertEquals(str(e), "abc: Response should not contain 'once'") try: self.assertContains(response, 'never', 1) except AssertionError, e: self.assertEquals(str(e), "Found 0 instances of 'never' in response (expected 1)") try: self.assertContains(response, 'never', 1, msg_prefix='abc') except AssertionError, e: self.assertEquals(str(e), "abc: Found 0 instances of 'never' in response (expected 1)") try: self.assertContains(response, 'once', 0) except AssertionError, e: self.assertEquals(str(e), "Found 1 instances of 'once' in response (expected 0)") try: self.assertContains(response, 'once', 0, msg_prefix='abc') except AssertionError, e: self.assertEquals(str(e), "abc: Found 1 instances of 'once' in response (expected 0)") try: self.assertContains(response, 'once', 2) except AssertionError, e: self.assertEquals(str(e), "Found 1 instances of 'once' in response (expected 2)") try: self.assertContains(response, 'once', 2, msg_prefix='abc') except AssertionError, e: self.assertEquals(str(e), "abc: Found 1 instances of 'once' in response (expected 2)") try: self.assertContains(response, 'twice', 1) except AssertionError, e: self.assertEquals(str(e), "Found 2 instances of 'twice' in response (expected 1)") try: self.assertContains(response, 'twice', 1, msg_prefix='abc') except AssertionError, e: self.assertEquals(str(e), "abc: Found 2 instances of 'twice' in response (expected 1)") try: self.assertContains(response, 'thrice') except AssertionError, e: self.assertEquals(str(e), "Couldn't find 'thrice' in response") try: self.assertContains(response, 'thrice', msg_prefix='abc') except AssertionError, e: self.assertEquals(str(e), "abc: Couldn't find 'thrice' in response") try: self.assertContains(response, 'thrice', 3) except AssertionError, e: self.assertEquals(str(e), "Found 0 instances of 'thrice' in response (expected 3)") try: self.assertContains(response, 'thrice', 3, msg_prefix='abc') except AssertionError, e: self.assertEquals(str(e), "abc: Found 0 instances of 'thrice' in response (expected 3)") def test_unicode_contains(self): "Unicode characters can be found in template context" #Regression test for #10183 r = self.client.get('/test_client_regress/check_unicode/') self.assertContains(r, u'さかき') self.assertContains(r, '\xe5\xb3\xa0'.decode('utf-8')) def test_unicode_not_contains(self): "Unicode characters can be searched for, and not found in template context" #Regression test for #10183 r = self.client.get('/test_client_regress/check_unicode/') self.assertNotContains(r, u'はたけ') self.assertNotContains(r, '\xe3\x81\xaf\xe3\x81\x9f\xe3\x81\x91'.decode('utf-8')) class AssertTemplateUsedTests(TestCase): fixtures = ['testdata.json'] def test_no_context(self): "Template usage assertions work then templates aren't in use" response = self.client.get('/test_client_regress/no_template_view/') # Check that the no template case doesn't mess with the template assertions self.assertTemplateNotUsed(response, 'GET Template') try: self.assertTemplateUsed(response, 'GET Template') except AssertionError, e: self.assertEquals(str(e), "No templates used to render the response") try: self.assertTemplateUsed(response, 'GET Template', msg_prefix='abc') except AssertionError, e: self.assertEquals(str(e), "abc: No templates used to render the response") def test_single_context(self): "Template assertions work when there is a single context" response = self.client.get('/test_client/post_view/', {}) try: self.assertTemplateNotUsed(response, 'Empty GET Template') except AssertionError, e: self.assertEquals(str(e), "Template 'Empty GET Template' was used unexpectedly in rendering the response") try: self.assertTemplateNotUsed(response, 'Empty GET Template', msg_prefix='abc') except AssertionError, e: self.assertEquals(str(e), "abc: Template 'Empty GET Template' was used unexpectedly in rendering the response") try: self.assertTemplateUsed(response, 'Empty POST Template') except AssertionError, e: self.assertEquals(str(e), "Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template") try: self.assertTemplateUsed(response, 'Empty POST Template', msg_prefix='abc') except AssertionError, e: self.assertEquals(str(e), "abc: Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template") def test_multiple_context(self): "Template assertions work when there are multiple contexts" post_data = { 'text': 'Hello World', 'email': 'foo@example.com', 'value': 37, 'single': 'b', 'multi': ('b','c','e') } response = self.client.post('/test_client/form_view_with_template/', post_data) self.assertContains(response, 'POST data OK') try: self.assertTemplateNotUsed(response, "form_view.html") except AssertionError, e: self.assertEquals(str(e), "Template 'form_view.html' was used unexpectedly in rendering the response") try: self.assertTemplateNotUsed(response, 'base.html') except AssertionError, e: self.assertEquals(str(e), "Template 'base.html' was used unexpectedly in rendering the response") try: self.assertTemplateUsed(response, "Valid POST Template") except AssertionError, e: self.assertEquals(str(e), "Template 'Valid POST Template' was not a template used to render the response. Actual template(s) used: form_view.html, base.html") class AssertRedirectsTests(TestCase): def test_redirect_page(self): "An assertion is raised if the original page couldn't be retrieved as expected" # This page will redirect with code 301, not 302 response = self.client.get('/test_client/permanent_redirect_view/') try: self.assertRedirects(response, '/test_client/get_view/') except AssertionError, e: self.assertEquals(str(e), "Response didn't redirect as expected: Response code was 301 (expected 302)") try: self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc') except AssertionError, e: self.assertEquals(str(e), "abc: Response didn't redirect as expected: Response code was 301 (expected 302)") def test_lost_query(self): "An assertion is raised if the redirect location doesn't preserve GET parameters" response = self.client.get('/test_client/redirect_view/', {'var': 'value'}) try: self.assertRedirects(response, '/test_client/get_view/') except AssertionError, e: self.assertEquals(str(e), "Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'") try: self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc') except AssertionError, e: self.assertEquals(str(e), "abc: Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'") def test_incorrect_target(self): "An assertion is raised if the response redirects to another target" response = self.client.get('/test_client/permanent_redirect_view/') try: # Should redirect to get_view self.assertRedirects(response, '/test_client/some_view/') except AssertionError, e: self.assertEquals(str(e), "Response didn't redirect as expected: Response code was 301 (expected 302)") def test_target_page(self): "An assertion is raised if the response redirect target cannot be retrieved as expected" response = self.client.get('/test_client/double_redirect_view/') try: # The redirect target responds with a 301 code, not 200 self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/') except AssertionError, e: self.assertEquals(str(e), "Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)") try: # The redirect target responds with a 301 code, not 200 self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/', msg_prefix='abc') except AssertionError, e: self.assertEquals(str(e), "abc: Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)") def test_redirect_chain(self): "You can follow a redirect chain of multiple redirects" response = self.client.get('/test_client_regress/redirects/further/more/', {}, follow=True) self.assertRedirects(response, '/test_client_regress/no_template_view/', status_code=301, target_status_code=200) self.assertEquals(len(response.redirect_chain), 1) self.assertEquals(response.redirect_chain[0], ('http://testserver/test_client_regress/no_template_view/', 301)) def test_multiple_redirect_chain(self): "You can follow a redirect chain of multiple redirects" response = self.client.get('/test_client_regress/redirects/', {}, follow=True) self.assertRedirects(response, '/test_client_regress/no_template_view/', status_code=301, target_status_code=200) self.assertEquals(len(response.redirect_chain), 3) self.assertEquals(response.redirect_chain[0], ('http://testserver/test_client_regress/redirects/further/', 301)) self.assertEquals(response.redirect_chain[1], ('http://testserver/test_client_regress/redirects/further/more/', 301)) self.assertEquals(response.redirect_chain[2], ('http://testserver/test_client_regress/no_template_view/', 301)) def test_redirect_chain_to_non_existent(self): "You can follow a chain to a non-existent view" response = self.client.get('/test_client_regress/redirect_to_non_existent_view2/', {}, follow=True) self.assertRedirects(response, '/test_client_regress/non_existent_view/', status_code=301, target_status_code=404) def test_redirect_chain_to_self(self): "Redirections to self are caught and escaped" response = self.client.get('/test_client_regress/redirect_to_self/', {}, follow=True) # The chain of redirects stops once the cycle is detected. self.assertRedirects(response, '/test_client_regress/redirect_to_self/', status_code=301, target_status_code=301) self.assertEquals(len(response.redirect_chain), 2) def test_circular_redirect(self): "Circular redirect chains are caught and escaped" response = self.client.get('/test_client_regress/circular_redirect_1/', {}, follow=True) # The chain of redirects will get back to the starting point, but stop there. self.assertRedirects(response, '/test_client_regress/circular_redirect_2/', status_code=301, target_status_code=301) self.assertEquals(len(response.redirect_chain), 4) def test_redirect_chain_post(self): "A redirect chain will be followed from an initial POST post" response = self.client.post('/test_client_regress/redirects/', {'nothing': 'to_send'}, follow=True) self.assertRedirects(response, '/test_client_regress/no_template_view/', 301, 200) self.assertEquals(len(response.redirect_chain), 3) def test_redirect_chain_head(self): "A redirect chain will be followed from an initial HEAD request" response = self.client.head('/test_client_regress/redirects/', {'nothing': 'to_send'}, follow=True) self.assertRedirects(response, '/test_client_regress/no_template_view/', 301, 200) self.assertEquals(len(response.redirect_chain), 3) def test_redirect_chain_options(self): "A redirect chain will be followed from an initial OPTIONS request" response = self.client.options('/test_client_regress/redirects/', {'nothing': 'to_send'}, follow=True) self.assertRedirects(response, '/test_client_regress/no_template_view/', 301, 200) self.assertEquals(len(response.redirect_chain), 3) def test_redirect_chain_put(self): "A redirect chain will be followed from an initial PUT request" response = self.client.put('/test_client_regress/redirects/', {'nothing': 'to_send'}, follow=True) self.assertRedirects(response, '/test_client_regress/no_template_view/', 301, 200) self.assertEquals(len(response.redirect_chain), 3) def test_redirect_chain_delete(self): "A redirect chain will be followed from an initial DELETE request" response = self.client.delete('/test_client_regress/redirects/', {'nothing': 'to_send'}, follow=True) self.assertRedirects(response, '/test_client_regress/no_template_view/', 301, 200) self.assertEquals(len(response.redirect_chain), 3) def test_redirect_chain_on_non_redirect_page(self): "An assertion is raised if the original page couldn't be retrieved as expected" # This page will redirect with code 301, not 302 response = self.client.get('/test_client/get_view/', follow=True) try: self.assertRedirects(response, '/test_client/get_view/') except AssertionError, e: self.assertEquals(str(e), "Response didn't redirect as expected: Response code was 200 (expected 302)") try: self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc') except AssertionError, e: self.assertEquals(str(e), "abc: Response didn't redirect as expected: Response code was 200 (expected 302)") def test_redirect_on_non_redirect_page(self): "An assertion is raised if the original page couldn't be retrieved as expected" # This page will redirect with code 301, not 302 response = self.client.get('/test_client/get_view/') try: self.assertRedirects(response, '/test_client/get_view/') except AssertionError, e: self.assertEquals(str(e), "Response didn't redirect as expected: Response code was 200 (expected 302)") try: self.assertRedirects(response, '/test_client/get_view/', msg_prefix='abc') except AssertionError, e: self.assertEquals(str(e), "abc: Response didn't redirect as expected: Response code was 200 (expected 302)") class AssertFormErrorTests(TestCase): def test_unknown_form(self): "An assertion is raised if the form name is unknown" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b','c','e') } response = self.client.post('/test_client/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.') except AssertionError, e: self.assertEqual(str(e), "The form 'wrong_form' was not used to render the response") try: self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.', msg_prefix='abc') except AssertionError, e: self.assertEqual(str(e), "abc: The form 'wrong_form' was not used to render the response") def test_unknown_field(self): "An assertion is raised if the field name is unknown" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b','c','e') } response = self.client.post('/test_client/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'form', 'some_field', 'Some error.') except AssertionError, e: self.assertEqual(str(e), "The form 'form' in context 0 does not contain the field 'some_field'") try: self.assertFormError(response, 'form', 'some_field', 'Some error.', msg_prefix='abc') except AssertionError, e: self.assertEqual(str(e), "abc: The form 'form' in context 0 does not contain the field 'some_field'") def test_noerror_field(self): "An assertion is raised if the field doesn't have any errors" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b','c','e') } response = self.client.post('/test_client/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'form', 'value', 'Some error.') except AssertionError, e: self.assertEqual(str(e), "The field 'value' on form 'form' in context 0 contains no errors") try: self.assertFormError(response, 'form', 'value', 'Some error.', msg_prefix='abc') except AssertionError, e: self.assertEqual(str(e), "abc: The field 'value' on form 'form' in context 0 contains no errors") def test_unknown_error(self): "An assertion is raised if the field doesn't contain the provided error" post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b','c','e') } response = self.client.post('/test_client/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'form', 'email', 'Some error.') except AssertionError, e: self.assertEqual(str(e), "The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [u'Enter a valid e-mail address.'])") try: self.assertFormError(response, 'form', 'email', 'Some error.', msg_prefix='abc') except AssertionError, e: self.assertEqual(str(e), "abc: The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [u'Enter a valid e-mail address.'])") def test_unknown_nonfield_error(self): """ Checks that an assertion is raised if the form's non field errors doesn't contain the provided error. """ post_data = { 'text': 'Hello World', 'email': 'not an email address', 'value': 37, 'single': 'b', 'multi': ('b','c','e') } response = self.client.post('/test_client/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Invalid POST Template") try: self.assertFormError(response, 'form', None, 'Some error.') except AssertionError, e: self.assertEqual(str(e), "The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )") try: self.assertFormError(response, 'form', None, 'Some error.', msg_prefix='abc') except AssertionError, e: self.assertEqual(str(e), "abc: The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )") class LoginTests(TestCase): fixtures = ['testdata'] def test_login_different_client(self): "Check that using a different test client doesn't violate authentication" # Create a second client, and log in. c = Client() login = c.login(username='testclient', password='password') self.failUnless(login, 'Could not log in') # Get a redirection page with the second client. response = c.get("/test_client_regress/login_protected_redirect_view/") # At this points, the self.client isn't logged in. # Check that assertRedirects uses the original client, not the # default client. self.assertRedirects(response, "http://testserver/test_client_regress/get_view/") class SessionEngineTests(TestCase): fixtures = ['testdata'] def setUp(self): self.old_SESSION_ENGINE = settings.SESSION_ENGINE settings.SESSION_ENGINE = 'regressiontests.test_client_regress.session' def tearDown(self): settings.SESSION_ENGINE = self.old_SESSION_ENGINE def test_login(self): "A session engine that modifies the session key can be used to log in" login = self.client.login(username='testclient', password='password') self.failUnless(login, 'Could not log in') # Try to access a login protected page. response = self.client.get("/test_client/login_protected_view/") self.assertEqual(response.status_code, 200) self.assertEqual(response.context['user'].username, 'testclient') class URLEscapingTests(TestCase): def test_simple_argument_get(self): "Get a view that has a simple string argument" response = self.client.get(reverse('arg_view', args=['Slartibartfast'])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, 'Howdy, Slartibartfast') def test_argument_with_space_get(self): "Get a view that has a string argument that requires escaping" response = self.client.get(reverse('arg_view', args=['Arthur Dent'])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, 'Hi, Arthur') def test_simple_argument_post(self): "Post for a view that has a simple string argument" response = self.client.post(reverse('arg_view', args=['Slartibartfast'])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, 'Howdy, Slartibartfast') def test_argument_with_space_post(self): "Post for a view that has a string argument that requires escaping" response = self.client.post(reverse('arg_view', args=['Arthur Dent'])) self.assertEqual(response.status_code, 200) self.assertEqual(response.content, 'Hi, Arthur') class ExceptionTests(TestCase): fixtures = ['testdata.json'] def test_exception_cleared(self): "#5836 - A stale user exception isn't re-raised by the test client." login = self.client.login(username='testclient',password='password') self.failUnless(login, 'Could not log in') try: response = self.client.get("/test_client_regress/staff_only/") self.fail("General users should not be able to visit this page") except SuspiciousOperation: pass # At this point, an exception has been raised, and should be cleared. # This next operation should be successful; if it isn't we have a problem. login = self.client.login(username='staff', password='password') self.failUnless(login, 'Could not log in') try: self.client.get("/test_client_regress/staff_only/") except SuspiciousOperation: self.fail("Staff should be able to visit this page") class TemplateExceptionTests(TestCase): def setUp(self): # Reset the loaders so they don't try to render cached templates. if loader.template_source_loaders is not None: for template_loader in loader.template_source_loaders: if hasattr(template_loader, 'reset'): template_loader.reset() self.old_templates = settings.TEMPLATE_DIRS settings.TEMPLATE_DIRS = () def tearDown(self): settings.TEMPLATE_DIRS = self.old_templates def test_no_404_template(self): "Missing templates are correctly reported by test client" try: response = self.client.get("/no_such_view/") self.fail("Should get error about missing template") except TemplateDoesNotExist: pass def test_bad_404_template(self): "Errors found when rendering 404 error templates are re-raised" settings.TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'bad_templates'),) try: response = self.client.get("/no_such_view/") self.fail("Should get error about syntax error in template") except TemplateSyntaxError: pass # We need two different tests to check URLconf substitution - one to check # it was changed, and another one (without self.urls) to check it was reverted on # teardown. This pair of tests relies upon the alphabetical ordering of test execution. class UrlconfSubstitutionTests(TestCase): urls = 'regressiontests.test_client_regress.urls' def test_urlconf_was_changed(self): "TestCase can enforce a custom URLconf on a per-test basis" url = reverse('arg_view', args=['somename']) self.assertEquals(url, '/arg_view/somename/') # This test needs to run *after* UrlconfSubstitutionTests; the zz prefix in the # name is to ensure alphabetical ordering. class zzUrlconfSubstitutionTests(TestCase): def test_urlconf_was_reverted(self): "URLconf is reverted to original value after modification in a TestCase" url = reverse('arg_view', args=['somename']) self.assertEquals(url, '/test_client_regress/arg_view/somename/') class ContextTests(TestCase): fixtures = ['testdata'] def test_single_context(self): "Context variables can be retrieved from a single context" response = self.client.get("/test_client_regress/request_data/", data={'foo':'whiz'}) self.assertEqual(response.context.__class__, Context) self.assertTrue('get-foo' in response.context) self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['request-foo'], 'whiz') self.assertEqual(response.context['data'], 'sausage') try: response.context['does-not-exist'] self.fail('Should not be able to retrieve non-existent key') except KeyError, e: self.assertEquals(e.args[0], 'does-not-exist') def test_inherited_context(self): "Context variables can be retrieved from a list of contexts" response = self.client.get("/test_client_regress/request_data_extended/", data={'foo':'whiz'}) self.assertEqual(response.context.__class__, ContextList) self.assertEqual(len(response.context), 2) self.assertTrue('get-foo' in response.context) self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['request-foo'], 'whiz') self.assertEqual(response.context['data'], 'bacon') try: response.context['does-not-exist'] self.fail('Should not be able to retrieve non-existent key') except KeyError, e: self.assertEquals(e.args[0], 'does-not-exist') class SessionTests(TestCase): fixtures = ['testdata.json'] def test_session(self): "The session isn't lost if a user logs in" # The session doesn't exist to start. response = self.client.get('/test_client_regress/check_session/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, 'NO') # This request sets a session variable. response = self.client.get('/test_client_regress/set_session/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, 'set_session') # Check that the session has been modified response = self.client.get('/test_client_regress/check_session/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, 'YES') # Log in login = self.client.login(username='testclient',password='password') self.failUnless(login, 'Could not log in') # Session should still contain the modified value response = self.client.get('/test_client_regress/check_session/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, 'YES') def test_logout(self): """Logout should work whether the user is logged in or not (#9978).""" self.client.logout() login = self.client.login(username='testclient',password='password') self.failUnless(login, 'Could not log in') self.client.logout() self.client.logout() class RequestMethodTests(TestCase): def test_get(self): "Request a view via request method GET" response = self.client.get('/test_client_regress/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, 'request method: GET') def test_post(self): "Request a view via request method POST" response = self.client.post('/test_client_regress/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, 'request method: POST') def test_head(self): "Request a view via request method HEAD" response = self.client.head('/test_client_regress/request_methods/') self.assertEqual(response.status_code, 200) # A HEAD request doesn't return any content. self.assertNotEqual(response.content, 'request method: HEAD') self.assertEqual(response.content, '') def test_options(self): "Request a view via request method OPTIONS" response = self.client.options('/test_client_regress/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, 'request method: OPTIONS') def test_put(self): "Request a view via request method PUT" response = self.client.put('/test_client_regress/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, 'request method: PUT') def test_delete(self): "Request a view via request method DELETE" response = self.client.delete('/test_client_regress/request_methods/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, 'request method: DELETE') class RequestMethodStringDataTests(TestCase): def test_post(self): "Request a view with string data via request method POST" # Regression test for #11371 data = u'{"test": "json"}' response = self.client.post('/test_client_regress/request_methods/', data=data, content_type='application/json') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, 'request method: POST') def test_put(self): "Request a view with string data via request method PUT" # Regression test for #11371 data = u'{"test": "json"}' response = self.client.put('/test_client_regress/request_methods/', data=data, content_type='application/json') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, 'request method: PUT') class QueryStringTests(TestCase): def test_get_like_requests(self): for method_name in ('get','head','options','put','delete'): # A GET-like request can pass a query string as data method = getattr(self.client, method_name) response = method("/test_client_regress/request_data/", data={'foo':'whiz'}) self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['request-foo'], 'whiz') # A GET-like request can pass a query string as part of the URL response = method("/test_client_regress/request_data/?foo=whiz") self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['request-foo'], 'whiz') # Data provided in the URL to a GET-like request is overridden by actual form data response = method("/test_client_regress/request_data/?foo=whiz", data={'foo':'bang'}) self.assertEqual(response.context['get-foo'], 'bang') self.assertEqual(response.context['request-foo'], 'bang') response = method("/test_client_regress/request_data/?foo=whiz", data={'bar':'bang'}) self.assertEqual(response.context['get-foo'], None) self.assertEqual(response.context['get-bar'], 'bang') self.assertEqual(response.context['request-foo'], None) self.assertEqual(response.context['request-bar'], 'bang') def test_post_like_requests(self): # A POST-like request can pass a query string as data response = self.client.post("/test_client_regress/request_data/", data={'foo':'whiz'}) self.assertEqual(response.context['get-foo'], None) self.assertEqual(response.context['post-foo'], 'whiz') # A POST-like request can pass a query string as part of the URL response = self.client.post("/test_client_regress/request_data/?foo=whiz") self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['post-foo'], None) self.assertEqual(response.context['request-foo'], 'whiz') # POST data provided in the URL augments actual form data response = self.client.post("/test_client_regress/request_data/?foo=whiz", data={'foo':'bang'}) self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['post-foo'], 'bang') self.assertEqual(response.context['request-foo'], 'bang') response = self.client.post("/test_client_regress/request_data/?foo=whiz", data={'bar':'bang'}) self.assertEqual(response.context['get-foo'], 'whiz') self.assertEqual(response.context['get-bar'], None) self.assertEqual(response.context['post-foo'], None) self.assertEqual(response.context['post-bar'], 'bang') self.assertEqual(response.context['request-foo'], 'whiz') self.assertEqual(response.context['request-bar'], 'bang') class UnicodePayloadTests(TestCase): def test_simple_unicode_payload(self): "A simple ASCII-only unicode JSON document can be POSTed" # Regression test for #10571 json = u'{"english": "mountain pass"}' response = self.client.post("/test_client_regress/parse_unicode_json/", json, content_type="application/json") self.assertEqual(response.content, json) def test_unicode_payload_utf8(self): "A non-ASCII unicode data encoded as UTF-8 can be POSTed" # Regression test for #10571 json = u'{"dog": "собака"}' response = self.client.post("/test_client_regress/parse_unicode_json/", json, content_type="application/json; charset=utf-8") self.assertEqual(response.content, json.encode('utf-8')) def test_unicode_payload_utf16(self): "A non-ASCII unicode data encoded as UTF-16 can be POSTed" # Regression test for #10571 json = u'{"dog": "собака"}' response = self.client.post("/test_client_regress/parse_unicode_json/", json, content_type="application/json; charset=utf-16") self.assertEqual(response.content, json.encode('utf-16')) def test_unicode_payload_non_utf(self): "A non-ASCII unicode data as a non-UTF based encoding can be POSTed" #Regression test for #10571 json = u'{"dog": "собака"}' response = self.client.post("/test_client_regress/parse_unicode_json/", json, content_type="application/json; charset=koi8-r") self.assertEqual(response.content, json.encode('koi8-r')) class DummyFile(object): def __init__(self, filename): self.name = filename def read(self): return 'TEST_FILE_CONTENT' class UploadedFileEncodingTest(TestCase): def test_file_encoding(self): encoded_file = encode_file('TEST_BOUNDARY', 'TEST_KEY', DummyFile('test_name.bin')) self.assertEqual('--TEST_BOUNDARY', encoded_file[0]) self.assertEqual('Content-Disposition: form-data; name="TEST_KEY"; filename="test_name.bin"', encoded_file[1]) self.assertEqual('TEST_FILE_CONTENT', encoded_file[-1]) def test_guesses_content_type_on_file_encoding(self): self.assertEqual('Content-Type: application/octet-stream', encode_file('IGNORE', 'IGNORE', DummyFile("file.bin"))[2]) self.assertEqual('Content-Type: text/plain', encode_file('IGNORE', 'IGNORE', DummyFile("file.txt"))[2]) self.assertEqual('Content-Type: application/zip', encode_file('IGNORE', 'IGNORE', DummyFile("file.zip"))[2]) self.assertEqual('Content-Type: application/octet-stream', encode_file('IGNORE', 'IGNORE', DummyFile("file.unknown"))[2]) ```
[ { "content": "```python\n# This file is part of Py6S.\n#\n# Copyright 2012 Robin Wilson and contributors listed in the CONTRIBUTORS file.\n#\n# Py6S is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundat...
[ { "content": "<|memory_start|>```python\n# This file is part of Py6S.\n#\n# Copyright 2012 Robin Wilson and contributors listed in the CONTRIBUTORS file.\n#\n# Py6S is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free ...
```python # This file is part of Py6S. # # Copyright 2012 Robin Wilson and contributors listed in the CONTRIBUTORS file. # # Py6S is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Py6S is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Py6S. If not, see <http://www.gnu.org/licenses/>. import unittest from Py6S import AeroProfile, AtmosProfile, ParameterError, SixS class AtmosProfileTests(unittest.TestCase): def test_atmos_profile(self): aps = [ AtmosProfile.Tropical, AtmosProfile.NoGaseousAbsorption, AtmosProfile.UserWaterAndOzone(0.9, 3), ] results = [0.2723143, 0.2747224, 0.2476101] for i in range(len(aps)): s = SixS() s.atmos_profile = aps[i] s.run() self.assertAlmostEqual( s.outputs.apparent_reflectance, results[i], msg="Error in atmos profile with ID %s. Got %f, expected %f." % (str(aps[i]), s.outputs.apparent_reflectance, results[i]), delta=0.002, ) def test_from_lat_and_date(self): ap = AtmosProfile.FromLatitudeAndDate(53, "2015-07-14") assert ap == AtmosProfile.PredefinedType(AtmosProfile.SubarcticSummer) class AeroProfileTests(unittest.TestCase): def test_aero_profile(self): user_ap = AeroProfile.UserProfile(AeroProfile.Maritime) user_ap.add_layer(5, 0.34) aps = [ AeroProfile.Continental, AeroProfile.NoAerosols, AeroProfile.User(dust=0.3, oceanic=0.7), user_ap, ] results = [122.854, 140.289, 130.866, 136.649] for i in range(len(aps)): s = SixS() s.aero_profile = aps[i] s.run() self.assertAlmostEqual( s.outputs.apparent_radiance, results[i], "Error in aerosol profile with ID %s. Got %f, expected %f." % (str(aps[i]), s.outputs.apparent_radiance, results[i]), delta=0.002, ) def test_aero_profile_errors(self): with self.assertRaises(ParameterError): AeroProfile.User(dust=0.8, oceanic=0.4) def test_sun_photo_dist_errors1(self): with self.assertRaises(ParameterError): # Different numbers of elements for first two arguments AeroProfile.SunPhotometerDistribution( [ 0.050000001, 0.065604001, 0.086076997, 0.112939, 0.148184001, 0.194428995, 0.255104989, 0.334715992, 0.439173013, 0.576227009, 0.756052017, 0.99199599, 1.30157101, 1.707757, 2.24070191, 2.93996596, 3.85745192, 5.06126022, 6.64074516, 8.71314526, ], [ 0.001338098, 0.007492487, 0.026454749, 0.058904506, 0.082712278, 0.073251031, 0.040950641, 0.014576218, 0.003672085, 0.001576356, 0.002422644, 0.004472982, 0.007452302, 0.011037065, 0.014523974, 0.016981738, 0.017641816, 0.016284294, 0.01335547, 0.009732267, 0.006301342, 0.003625077, ], [1.47] * 20, [0.0093] * 20, ) def test_sun_photo_dist_errors2(self): with self.assertRaises(ParameterError): # Different numbers of elements for first two arguments AeroProfile.SunPhotometerDistribution( [ 0.050000001, 0.065604001, 0.086076997, 0.112939, 0.148184001, 0.194428995, 0.255104989, 0.334715992, 0.439173013, 0.576227009, 0.756052017, 0.99199599, 1.30157101, 1.707757, 2.24070191, 2.93996596, 3.85745192, 5.06126022, 6.64074516, 8.71314526, 11.4322901, 15, ], [ 0.001338098, 0.007492487, 0.026454749, 0.058904506, 0.082712278, 0.073251031, 0.040950641, 0.014576218, 0.003672085, 0.001576356, 0.002422644, 0.004472982, 0.007452302, 0.011037065, 0.014523974, 0.016981738, 0.017641816, 0.016284294, 0.01335547, 0.009732267, 0.006301342, 0.003625077, ], [1.47] * 15, [0.0093] * 20, ) def test_sun_photo_dist_errors3(self): # Different numbers of elements for first two arguments ap1 = AeroProfile.SunPhotometerDistribution( [ 0.050000001, 0.065604001, 0.086076997, 0.112939, 0.148184001, 0.194428995, 0.255104989, 0.334715992, 0.439173013, 0.576227009, 0.756052017, 0.99199599, 1.30157101, 1.707757, 2.24070191, 2.93996596, 3.85745192, 5.06126022, 6.64074516, 8.71314526, 11.4322901, 15, ], [ 0.001338098, 0.007492487, 0.026454749, 0.058904506, 0.082712278, 0.073251031, 0.040950641, 0.014576218, 0.003672085, 0.001576356, 0.002422644, 0.004472982, 0.007452302, 0.011037065, 0.014523974, 0.016981738, 0.017641816, 0.016284294, 0.01335547, 0.009732267, 0.006301342, 0.003625077, ], [1.47] * 20, [2.3] * 20, ) ap2 = AeroProfile.SunPhotometerDistribution( [ 0.050000001, 0.065604001, 0.086076997, 0.112939, 0.148184001, 0.194428995, 0.255104989, 0.334715992, 0.439173013, 0.576227009, 0.756052017, 0.99199599, 1.30157101, 1.707757, 2.24070191, 2.93996596, 3.85745192, 5.06126022, 6.64074516, 8.71314526, 11.4322901, 15, ], [ 0.001338098, 0.007492487, 0.026454749, 0.058904506, 0.082712278, 0.073251031, 0.040950641, 0.014576218, 0.003672085, 0.001576356, 0.002422644, 0.004472982, 0.007452302, 0.011037065, 0.014523974, 0.016981738, 0.017641816, 0.016284294, 0.01335547, 0.009732267, 0.006301342, 0.003625077, ], 1.47, 2.3, ) self.assertEqual(ap1, ap2) def test_multimodal_dist_errors1(self): with self.assertRaises(ParameterError): ap = AeroProfile.MultimodalLogNormalDistribution(0.001, 20) # Add > 4 components ap.add_component( 0.05, 2.03, 0.538, [ 1.508, 1.500, 1.500, 1.500, 1.500, 1.500, 1.500, 1.500, 1.495, 1.490, 1.490, 1.490, 1.486, 1.480, 1.470, 1.460, 1.456, 1.443, 1.430, 1.470, ], [ 3.24e-07, 3.0e-08, 2.86e-08, 2.51e-08, 2.2e-08, 2.0e-08, 1.0e-08, 1.0e-08, 1.48e-08, 2.0e-08, 6.85e-08, 1.0e-07, 1.25e-06, 3.0e-06, 3.5e-04, 6.0e-04, 6.86e-04, 1.7e-03, 4.0e-03, 1.4e-03, ], ) ap.add_component( 0.0695, 2.03, 0.457, [ 1.452, 1.440, 1.438, 1.433, 1.432, 1.431, 1.431, 1.430, 1.429, 1.429, 1.429, 1.428, 1.427, 1.425, 1.411, 1.401, 1.395, 1.385, 1.364, 1.396, ], [ 1.0e-08, 1.0e-08, 1.0e-08, 1.0e-08, 1.0e-08, 1.0e-08, 1.0e-08, 1.0e-08, 1.38e-08, 1.47e-08, 1.68e-08, 1.93e-08, 4.91e-08, 1.75e-07, 9.66e-06, 1.94e-04, 3.84e-04, 1.12e-03, 2.51e-03, 1.31e-01, ], ) ap.add_component( 0.4, 2.03, 0.005, [ 1.508, 1.500, 1.500, 1.500, 1.500, 1.500, 1.500, 1.500, 1.495, 1.490, 1.490, 1.490, 1.486, 1.480, 1.470, 1.460, 1.456, 1.443, 1.430, 1.470, ], [ 3.24e-07, 3.0e-08, 2.86e-08, 2.51e-08, 2.2e-08, 2.0e-08, 1.0e-08, 1.0e-08, 1.48e-08, 2.0e-08, 6.85e-08, 1.0e-07, 1.25e-06, 3.0e-06, 3.5e-04, 6.0e-04, 6.86e-04, 1.7e-03, 4.0e-03, 1.4e-03, ], ) ap.add_component( 0.4, 2.03, 0.005, [ 1.508, 1.500, 1.500, 1.500, 1.500, 1.500, 1.500, 1.500, 1.495, 1.490, 1.490, 1.490, 1.486, 1.480, 1.470, 1.460, 1.456, 1.443, 1.430, 1.470, ], [ 3.24e-07, 3.0e-08, 2.86e-08, 2.51e-08, 2.2e-08, 2.0e-08, 1.0e-08, 1.0e-08, 1.48e-08, 2.0e-08, 6.85e-08, 1.0e-07, 1.25e-06, 3.0e-06, 3.5e-04, 6.0e-04, 6.86e-04, 1.7e-03, 4.0e-03, 1.4e-03, ], ) ap.add_component( 0.4, 2.03, 0.005, [ 1.508, 1.500, 1.500, 1.500, 1.500, 1.500, 1.500, 1.500, 1.495, 1.490, 1.490, 1.490, 1.486, 1.480, 1.470, 1.460, 1.456, 1.443, 1.430, 1.470, ], [ 3.24e-07, 3.0e-08, 2.86e-08, 2.51e-08, 2.2e-08, 2.0e-08, 1.0e-08, 1.0e-08, 1.48e-08, 2.0e-08, 6.85e-08, 1.0e-07, 1.25e-06, 3.0e-06, 3.5e-04, 6.0e-04, 6.86e-04, 1.7e-03, 4.0e-03, 1.4e-03, ], ) def test_multimodal_dist_errors2(self): with self.assertRaises(ParameterError): ap = AeroProfile.MultimodalLogNormalDistribution(0.001, 20) ap.add_component( 0.05, 2.03, 0.538, [ 1.508, 1.500, 1.500, 1.500, 1.500, 1.500, 1.500, 1.500, 1.495, 1.490, 1.490, 1.490, 1.486, 1.480, 1.470, 1.460, 1.456, 1.443, 1.430, 1.470, ], [ 3.24e-07, 3.0e-08, 2.86e-08, 2.51e-08, 2.2e-08, 2.0e-08, 1.0e-08, 1.0e-08, 1.48e-08, 2.0e-08, 6.85e-08, 1.0e-07, 1.25e-06, 3.0e-06, 3.5e-04, 6.0e-04, 6.86e-04, ], ) def test_multimodal_dist_errors3(self): with self.assertRaises(ParameterError): ap = AeroProfile.MultimodalLogNormalDistribution(0.001, 20) ap.add_component( 0.4, 2.03, 0.005, [ 1.508, 1.500, 1.500, 1.500, 1.500, 1.500, 1.500, 1.500, 1.495, 1.490, 1.490, 1.490, 1.486, 1.480, 1.470, 1.460, 1.456, 1.443, 1.430, 1.470, 1.999, 1.999, 0, ], [ 3.24e-07, 3.0e-08, 2.86e-08, 2.51e-08, 2.2e-08, 2.0e-08, 1.0e-08, 1.0e-08, 1.48e-08, 2.0e-08, 6.85e-08, 1.0e-07, 1.25e-06, 3.0e-06, 3.5e-04, 6.0e-04, 6.86e-04, 1.7e-03, 4.0e-03, 1.4e-03, ], ) def test_running_multiple_add_components(self): s = SixS() real_intp = [0.0] * 20 imag_intp = [0.0] * 20 # Running these lines more than twice used to give an error for i in range(4): s.aeroprofile = AeroProfile.MultimodalLogNormalDistribution(0.085, 2.9) s.aeroprofile.add_component( rmean=2.65, sigma=0.62, percentage_density=0.093, refr_real=real_intp, refr_imag=imag_intp, ) ```
[ { "content": "Produce an exact reconstruction of the code:\n```python\n#\n# Copyright 2010-2011 Red Hat, Inc.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of th...
[ { "content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\n#\n# Copyright 2010-2011 Red Hat, Inc.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either...
```python # # Copyright 2010-2011 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Refer to the README and COPYING files for full details of the license # import utils import glob import os import tempfile import logging import hashlib from constants import P_VDSM_HOOKS, P_VDSM class HookError(Exception): pass def _scriptsPerDir(dir): return [ s for s in glob.glob(P_VDSM_HOOKS + dir + '/*') if os.access(s, os.X_OK) ] def _runHooksDir(domxml, dir, vmconf={}, raiseError=True): scripts = _scriptsPerDir(dir) scripts.sort() if not scripts: return domxml xmlfd, xmlname = tempfile.mkstemp() try: os.write(xmlfd, domxml or '') os.close(xmlfd) scriptenv = os.environ.copy() scriptenv.update(vmconf.get('custom', {})) if vmconf.get('vmId'): scriptenv['vmId'] = vmconf.get('vmId') ppath = scriptenv.get('PYTHONPATH', '') scriptenv['PYTHONPATH'] = ':'.join(ppath.split(':') + [P_VDSM]) scriptenv['_hook_domxml'] = xmlname for k, v in scriptenv.iteritems(): scriptenv[k] = unicode(v).encode('utf-8') errorSeen = False for s in scripts: rc, out, err = utils.execCmd([s], sudo=False, raw=True, env=scriptenv) logging.info(err) if rc != 0: errorSeen = True if rc == 2: break elif rc > 2: logging.warn('hook returned unexpected return code %s', rc) if errorSeen and raiseError: raise HookError() finalxml = file(xmlname).read() finally: os.unlink(xmlname) return finalxml def before_vm_start(domxml, vmconf={}): return _runHooksDir(domxml, 'before_vm_start', vmconf=vmconf) def after_vm_start(domxml, vmconf={}): return _runHooksDir(domxml, 'after_vm_start', vmconf=vmconf, raiseError=False) def before_vm_cont(domxml, vmconf={}): return _runHooksDir(domxml, 'before_vm_cont', vmconf=vmconf) def after_vm_cont(domxml, vmconf={}): return _runHooksDir(domxml, 'after_vm_cont', vmconf=vmconf, raiseError=False) def before_vm_pause(domxml, vmconf={}): return _runHooksDir(domxml, 'before_vm_pause', vmconf=vmconf) def after_vm_pause(domxml, vmconf={}): return _runHooksDir(domxml, 'after_vm_pause', vmconf=vmconf, raiseError=False) def before_vm_migrate_source(domxml, vmconf={}): return _runHooksDir(domxml, 'before_vm_migrate_source', vmconf=vmconf) def after_vm_migrate_source(domxml, vmconf={}): return _runHooksDir(domxml, 'after_vm_migrate_source', vmconf=vmconf, raiseError=False) def before_vm_migrate_destination(domxml, vmconf={}): return _runHooksDir(domxml, 'before_vm_migrate_destination', vmconf=vmconf) def after_vm_migrate_destination(domxml, vmconf={}): return _runHooksDir(domxml, 'after_vm_migrate_destination', vmconf=vmconf, raiseError=False) def before_vm_hibernate(domxml, vmconf={}): return _runHooksDir(domxml, 'before_vm_hibernate', vmconf=vmconf) def after_vm_hibernate(domxml, vmconf={}): return _runHooksDir(domxml, 'after_vm_hibernate', vmconf=vmconf, raiseError=False) def before_vm_dehibernate(domxml, vmconf={}): return _runHooksDir(domxml, 'before_vm_dehibernate', vmconf=vmconf) def after_vm_dehibernate(domxml, vmconf={}): return _runHooksDir(domxml, 'after_vm_hibernate', vmconf=vmconf, raiseError=False) def after_vm_destroy(domxml, vmconf={}): return _runHooksDir(domxml, 'after_vm_destroy', vmconf=vmconf, raiseError=False) def before_vdsm_start(): return _runHooksDir(None, 'before_vdsm_start', raiseError=False) def after_vdsm_stop(): return _runHooksDir(None, 'after_vdsm_stop', raiseError=False) def _getScriptInfo(path): try: with file(path) as f: md5 = hashlib.md5(f.read()).hexdigest() except: md5 = '' return {'md5': md5} def _getHookInfo(dir): def scripthead(script): return script[len(P_VDSM_HOOKS) + len(dir) + 1:] return dict([ (scripthead(script), _getScriptInfo(script)) for script in _scriptsPerDir(dir) ]) def installed(): res = {} for dir in os.listdir(P_VDSM_HOOKS): inf = _getHookInfo(dir) if inf: res[dir] = inf return res if __name__ == '__main__': import sys def usage(): print 'Usage: %s hook_name' % sys.argv[0] sys.exit(1) if len(sys.argv) >= 2: globals()[sys.argv[1]](*sys.argv[2:]) else: usage() ```
[ { "content": "Repeat the code precisely:\n```python\n__author__ = 'dev'\n\nimport re\nfrom http.server import SimpleHTTPRequestHandler\n\nHEADERS_HOST_PARAMETER_KEY_NAME = \"Host\"\nREQUEST_LINE_ENCODING = \"iso-8859-1\"\nHOST_PATTERN_KEY_NAME = \"host_pattern\"\nRESPONSE_CONTENT_PATH_KEY_NAME = \"response_cont...
[ { "content": "Repeat the code precisely:\n<|memory_start|>```python\n__author__ = 'dev'\n\nimport re\nfrom http.server import SimpleHTTPRequestHandler\n\nHEADERS_HOST_PARAMETER_KEY_NAME = \"Host\"\nREQUEST_LINE_ENCODING = \"iso-8859-1\"\nHOST_PATTERN_KEY_NAME = \"host_pattern\"\nRESPONSE_CONTENT_PATH_KEY_NAME =...
```python __author__ = 'dev' import re from http.server import SimpleHTTPRequestHandler HEADERS_HOST_PARAMETER_KEY_NAME = "Host" REQUEST_LINE_ENCODING = "iso-8859-1" HOST_PATTERN_KEY_NAME = "host_pattern" RESPONSE_CONTENT_PATH_KEY_NAME = "response_content_path" class FakeHTTPRequestHandler(SimpleHTTPRequestHandler): def do_GET(self): self.__set_path_setting() SimpleHTTPRequestHandler.do_GET(self) def do_HEAD(self): self.__set_path_setting() SimpleHTTPRequestHandler.do_HEAD(self) def do_POST(self): self.command = "GET" self.do_GET() def __set_path_setting(self): response_content_path = None if self.server.requests_config is not None: server_path = self.__get_server_path() if server_path in self.server.requests_config: request_config = self.server.requests_config[server_path] response_content_path = self.__get_response_content_path(request_config) if response_content_path is not None: self.path = response_content_path else: self.path = "/404" def __get_response_content_path(self, request_config): sorted_configs = sorted(request_config, key=len, reverse=True) server_host = self.__get_server_host() for config in sorted_configs: if HOST_PATTERN_KEY_NAME in config: result = re.search(config[HOST_PATTERN_KEY_NAME], server_host) if result is None: continue if RESPONSE_CONTENT_PATH_KEY_NAME in config: return config[RESPONSE_CONTENT_PATH_KEY_NAME] return None def __get_server_path(self): request_line = str(self.raw_requestline, REQUEST_LINE_ENCODING).rstrip('\r\n') words = request_line.split() if len(words) < 2: return "" return words[1] def __get_server_host(self): return self.headers[HEADERS_HOST_PARAMETER_KEY_NAME] ```
[ { "content": "Here is a code file:\n```python\n# Author: Sari Sabban\n# Email: sari.sabban@gmail.com\n# URL: https://github.com/sarisabban\n#\n# Created By: \tSari Sabban\n# Created Date: \t20 March 2017\n\nimport urllib\n\ndef ProtVR(x):\n\tlis=list() \n\tfilename='HELLO C '\n\tfilename=urllib.urlopen('h...
[ { "content": "Here is a code file:\n<|memory_start|>```python\n# Author: Sari Sabban\n# Email: sari.sabban@gmail.com\n# URL: https://github.com/sarisabban\n#\n# Created By: \tSari Sabban\n# Created Date: \t20 March 2017\n\nimport urllib\n\ndef ProtVR(x):\n\tlis=list() \n\tfilename='HELLO C '\n\tfilename=u...
```python # Author: Sari Sabban # Email: sari.sabban@gmail.com # URL: https://github.com/sarisabban # # Created By: Sari Sabban # Created Date: 20 March 2017 import urllib def ProtVR(x): lis=list() filename='HELLO C ' filename=urllib.urlopen('http://files.rcsb.org/view/'+x+'.pdb') lis.append('<script src="/static/aframe.min.js"></script>\n') lis.append('<a-scene>\n') lis.append('\t<a-sky color="#111111"></a-sky>\n') for line in filename: line=line.decode() if line.startswith('ATOM'): splitline=line.split() try: coordinates=(splitline[11],splitline[6],splitline[7],splitline[8]) except: coordinates=(splitline[10],splitline[6],splitline[7],splitline[8]) if coordinates[0]=='N': js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#2D2DE1"></a-sphere>' elif coordinates[0]=='C': js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#2DE12D"></a-sphere>' elif coordinates[0]=='O': js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#E14343"></a-sphere>' elif coordinates[0]=='H': js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#CBCBCB"></a-sphere>' elif coordinates[0]=='S': js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#CBAE38"></a-sphere>' elif coordinates[0]=='I': js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#830083"></a-sphere>' else: js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#6F6F6F"></a-sphere>' result=' '.join(js) lis.append(result) lis.append('</a-scene>') final=' '.join(lis) return(final) #print(final) #ProtVR('2HIU') ```
[ { "content": "Reconstruct the code file line-for-line, unmodified:\n```python\n# Copyright 2016 Raytheon BBN Technologies\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# ...
[ { "content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n# Copyright 2016 Raytheon BBN Technologies\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the Li...
```python # Copyright 2016 Raytheon BBN Technologies # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 __all__ = ['DPO72004C','DPO2024','DPO2014','RSA3308A'] from auspex.log import logger from .instrument import SCPIInstrument, Command, StringCommand, BoolCommand, FloatCommand, IntCommand, is_valid_ipv4 import numpy as np class _TekDPscope(SCPIInstrument): """Tektronix DP Oscilloscope Base Class""" encoding = StringCommand(get_string="DAT:ENC?;", set_string="DAT:ENC {:s};", allowed_values=["ASCI","RIB","RPB","FPB","SRI","SRP","SFP"]) byte_depth = IntCommand(get_string="WFMOutpre:BYT_Nr?;", set_string="WFMOutpre:BYT_Nr {:d};", allowed_values=[1,2,4,8]) data_start = IntCommand(get_string="DAT:STAR?;", set_string="DAT:STAR {:d};") data_stop = IntCommand(get_string="DAT:STOP?;", set_string="DAT:STOP {:d};") preamble = StringCommand(get_string="WFMOutpre?;") # Curve preamble record_length = IntCommand(get_string="HOR:ACQLENGTH?;") record_rate = FloatCommand(get_string="HOR:SAMPLER?;") button_press = StringCommand(set_string="FPAnel:PRESS {:s};", allowed_values=["RUnstop", "SINGleseq"]) def clear(self): self.interface.write("CLEAR ALL;") def snap(self): """Sets the start and stop points to the the current front panel display. This doesn't actually seem to work, strangely.""" self.interface.write("DAT SNAp;") def get_curve(self, channel=1, byte_depth=2): channel_string = "CH{:d}".format(channel) self.interface.write("DAT:SOU {:s};".format(channel_string)) #self.source_channel = 1 self.encoding = "SRI" # Signed ints record_length = self.record_length self.data_start = 1 self.data_stop = record_length self.byte_depth = byte_depth strf_from_depth = {1: 'b', 2: 'h', 4: 'l', 8: 'q'} curve = self.interface.query_binary_values("CURVe?;", datatype=strf_from_depth[byte_depth]) scale = self.interface.value('WFMO:YMU?;') offset = self.interface.value('WFMO:YOF?;') curve = (curve - offset)*scale return curve def get_timebase(self): return np.linspace(0, self.record_length/self.record_rate, self.record_length) def get_fastaq_curve(self, channel=1): channel_string = "CH{:d}".format(channel) self.interface.write("DAT:SOU {:s};".format(channel_string)) self.source_channel = 1 self.encoding = "SRP" # Unsigned ints self.byte_depth = 8 self.data_start = 1 self.data_stop = self.record_length curve = self.interface.query_binary_values("CURVe?;", datatype='Q').reshape((1000,252)) return curve def get_math_curve(self, channel=1): pass class DPO72004C(_TekDPscope): fast_frame = StringCommand(get_string="HORizontal:FASTframe:STATE?;", set_string="HORizontal:FASTframe:STATE {:s};", value_map = {True: '1', False: '0'}) num_fast_frames = IntCommand(get_string="HOR:FAST:COUN?;", set_string="HOR:FAST:COUN {:d};") def get_curve(self, channel=1, byte_depth=2): channel_string = "CH{:d}".format(channel) self.interface.write("DAT:SOU {:s};".format(channel_string)) #self.source_channel = 1 self.encoding = "SRI" # Signed ints record_length = self.record_length self.data_start = 1 self.data_stop = record_length self.byte_depth = byte_depth strf_from_depth = {1: 'b', 2: 'h', 4: 'l', 8: 'q'} curve = self.interface.query_binary_values("CURVe?;", datatype=strf_from_depth[byte_depth]) scale = self.interface.value('WFMO:YMU?;') offset = self.interface.value('WFMO:YOF?;') curve = (curve - offset)*scale if self.fast_frame: curve.resize((self.num_fast_frames, record_length)) return curve def __init__(self, resource_name, *args, **kwargs): resource_name += "::4000::SOCKET" #user guide recommends HiSLIP protocol super(DPO72004C, self).__init__(resource_name, *args, **kwargs) self.name = "Tektronix DPO72004C Oscilloscope" class DPO2024(_TekDPscope): def __init__(self, resource_name, *args, **kwargs): super(DPO2024, self).__init__(resource_name, *args, **kwargs) self.name = "Tektronix DPO2024 Oscilloscope" def connect(self, resource_name=None, interface_type=None): if resource_name is not None: self.resource_name = resource_name super(DPO2024, self).connect(resource_name=self.resource_name, interface_type=interface_type) self.interface._resource.read_termination = u"\n" self.interface._resource.write_termination = u"\n" class DPO2014(_TekDPscope): def __init__(self, resource_name, *args, **kwargs): super(DPO2014, self).__init__(resource_name, *args, **kwargs) self.name = "Tektronix DPO2014 Oscilloscope" def connect(self, resource_name=None, interface_type=None): if resource_name is not None: self.resource_name = resource_name super(DPO2014, self).connect(resource_name=self.resource_name, interface_type=interface_type) self.interface._resource.read_termination = u"\n" self.interface._resource.write_termination = u"\n" class RSA3308A(SCPIInstrument): """Tektronix RSA3308A SA""" instrument_type = "Spectrum Analyzer" frequency_center = FloatCommand(scpi_string=":FREQuency:CENTer") frequency_span = FloatCommand(scpi_string=":FREQuency:SPAN") frequency_start = FloatCommand(scpi_string=":FREQuency:STARt") frequency_stop = FloatCommand(scpi_string=":FREQuency:STOP") num_sweep_points = FloatCommand(scpi_string=":SWEep:POINTs") resolution_bandwidth = FloatCommand(scpi_string=":BANDwidth") sweep_time = FloatCommand(scpi_string=":SWEep:TIME") averaging_count = IntCommand(scpi_string=':AVER:COUN') marker1_amplitude = FloatCommand(scpi_string=':CALC:MARK1:Y') marker1_position = FloatCommand(scpi_string=':CALC:MARK1:X') mode = StringCommand(scpi_string=":INSTrument", allowed_values=["SA", "BASIC", "PULSE", "PNOISE"]) # phase noise application commands pn_offset_start = FloatCommand(scpi_string=":LPLot:FREQuency:OFFSet:STARt") pn_offset_stop = FloatCommand(scpi_string=":LPLot:FREQuency:OFFSet:STOP") pn_carrier_freq = FloatCommand(scpi_string=":FREQuency:CARRier") def __init__(self, resource_name=None, *args, **kwargs): super(RSA3308A, self).__init__(resource_name, *args, **kwargs) def connect(self, resource_name=None, interface_type=None): if resource_name is not None: self.resource_name = resource_name #If we only have an IP address then tack on the raw socket port to the VISA resource string if is_valid_ipv4(self.resource_name): self.resource_name += "::5025::SOCKET" super(RSA3308A, self).connect(resource_name=self.resource_name, interface_type=interface_type) self.interface._resource.read_termination = u"\n" self.interface._resource.write_termination = u"\n" self.interface._resource.timeout = 3000 #seem to have trouble timing out on first query sometimes def get_axis(self): return np.linspace(self.frequency_start, self.frequency_stop, self.num_sweep_points) def get_trace(self, num=1): self.interface.write(':FORM:DATA REAL,32') return self.interface.query_binary_values(":TRACE:DATA? TRACE{:d}".format(num), datatype="f", is_big_endian=True) def get_pn_trace(self, num=3): # num = 3 is raw data # num = 4 is smoothed data # returns a tuple of (freqs, dBc/Hz) self.interface.write(":FORM:DATA ASCII") response = self.interface.query(":FETCH:LPLot{:d}?".format(num)) xypts = np.array([float(x) for x in response.split(',')]) return xypts[::2], xypts[1::2] def restart_sweep(self): """ Aborts current sweep and restarts. """ self.interface.write(":INITiate:RESTart") def peak_search(self, marker=1): self.interface.write(':CALC:MARK{:d}:MAX'.format(marker)) def marker_to_center(self, marker=1): self.interface.write(':CALC:MARK{:d}:CENT'.format(marker)) def clear_averaging(self): self.interface.write(':AVER:CLE') ```
[ { "content": "Produce an exact reconstruction of the code:\n```python\nimport time,random,functools\nimport numpy as np\n\n\nfrom PyQt4 import QtCore, QtGui\nimport pyqtgraph as pg\nimport pyqtgraph.opengl as gl\n\n\nclass utilitiesClass():\n\t\"\"\"\n\tThis class contains methods that simplify setting up and r...
[ { "content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\nimport time,random,functools\nimport numpy as np\n\n\nfrom PyQt4 import QtCore, QtGui\nimport pyqtgraph as pg\nimport pyqtgraph.opengl as gl\n\n\nclass utilitiesClass():\n\t\"\"\"\n\tThis class contains methods that simplify ...
```python import time,random,functools import numpy as np from PyQt4 import QtCore, QtGui import pyqtgraph as pg import pyqtgraph.opengl as gl class utilitiesClass(): """ This class contains methods that simplify setting up and running an experiment. """ timers=[] viewBoxes=[] plots3D=[] plots2D=[] axisItems=[] total_plot_areas=0 def __init__(self): pass def updateViews(self,plot): for a in plot.viewBoxes: a.setGeometry(plot.getViewBox().sceneBoundingRect()) a.linkedViewChanged(plot.plotItem.vb, a.XAxis) def random_color(self): c=QtGui.QColor(random.randint(20,255),random.randint(20,255),random.randint(20,255)) if np.average(c.getRgb())<150: c=self.random_color() return c def add2DPlot(self,plot_area): plot=pg.PlotWidget() plot.setMinimumHeight(250) plot_area.addWidget(plot) plot.viewBoxes=[] self.plots2D.append(plot) return plot def add3DPlot(self,plot_area): plot3d = gl.GLViewWidget() #gx = gl.GLGridItem();gx.rotate(90, 0, 1, 0);gx.translate(-10, 0, 0);self.plot.addItem(gx) #gy = gl.GLGridItem();gy.rotate(90, 1, 0, 0);gy.translate(0, -10, 0);self.plot.addItem(gy) gz = gl.GLGridItem();#gz.translate(0, 0, -10); plot3d.addItem(gz); plot3d.opts['distance'] = 40 plot3d.opts['elevation'] = 5 plot3d.opts['azimuth'] = 20 plot3d.setMinimumHeight(250) plot_area.addWidget(plot3d) self.plots3D.append(plot3d) plot3d.plotLines3D=[] return plot3d def addCurve(self,plot,name='',col=(255,255,255),axis='left'): #if(len(name)):curve = plot.plot(name=name) #else:curve = plot.plot() if(len(name)):curve = pg.PlotCurveItem(name=name) else:curve = pg.PlotCurveItem() plot.addItem(curve) curve.setPen(color=col, width=1) return curve def rebuildLegend(self,plot): return plot.addLegend(offset=(-10,30)) def addAxis(self,plot,**args): p3 = pg.ViewBox() ax3 = pg.AxisItem('right') plot.plotItem.layout.addItem(ax3, 2, 3+len(self.axisItems)) plot.plotItem.scene().addItem(p3) ax3.linkToView(p3) p3.setXLink(plot.plotItem) ax3.setZValue(-10000) if args.get('label',False): ax3.setLabel(args.get('label',False), color=args.get('color','#ffffff')) plot.viewBoxes.append(p3) p3.setGeometry(plot.plotItem.vb.sceneBoundingRect()) p3.linkedViewChanged(plot.plotItem.vb, p3.XAxis) ## Handle view resizing Callback = functools.partial(self.updateViews,plot) plot.getViewBox().sigStateChanged.connect(Callback) self.axisItems.append(ax3) return p3 def loopTask(self,interval,func,*args): timer = QtCore.QTimer() timerCallback = functools.partial(func,*args) timer.timeout.connect(timerCallback) timer.start(interval) self.timers.append(timer) return timer def delayedTask(self,interval,func,*args): timer = QtCore.QTimer() timerCallback = functools.partial(func,*args) timer.singleShot(interval,timerCallback) self.timers.append(timer) def displayDialog(self,txt=''): QtGui.QMessageBox.about(self, 'Message', txt) ```
[ { "content": "Here is the source code:\n```python\nimport requests\nimport pytest\nimport re\nfrom _pytest.runner import runtestprotocol\nfrom http.client import RemoteDisconnected\nfrom support.device_stats_db import DeviceStatsDB\nfrom support.test_rerun import should_rerun_test\nfrom tests import test_suite_...
[ { "content": "Here is the source code:\n<|memory_start|>```python\nimport requests\nimport pytest\nimport re\nfrom _pytest.runner import runtestprotocol\nfrom http.client import RemoteDisconnected\nfrom support.device_stats_db import DeviceStatsDB\nfrom support.test_rerun import should_rerun_test\nfrom tests im...
```python import requests import pytest import re from _pytest.runner import runtestprotocol from http.client import RemoteDisconnected from support.device_stats_db import DeviceStatsDB from support.test_rerun import should_rerun_test from tests import test_suite_data, appium_container from datetime import datetime from os import environ from io import BytesIO from sauceclient import SauceClient, SauceException from support.api.network_api import NetworkApi from support.github_report import GithubHtmlReport from support.testrail_report import TestrailReport from tests.users import transaction_senders import tests sauce_username = environ.get('SAUCE_USERNAME') sauce_access_key = environ.get('SAUCE_ACCESS_KEY') github_token = environ.get('GIT_HUB_TOKEN') sauce = SauceClient(sauce_username, sauce_access_key) github_report = GithubHtmlReport() testrail_report = TestrailReport() def pytest_addoption(parser): parser.addoption("--build", action="store", default=datetime.now().strftime('%Y-%m-%d-%H-%M'), help="Specify build name") parser.addoption('--apk', action='store', default=None, help='Url or local path to apk') parser.addoption('--env', action='store', default='sauce', help='Specify environment: local/sauce/api') parser.addoption('--platform_version', action='store', default='8.0', help='Android device platform version') parser.addoption('--log_steps', action='store', default=False, help='Display each test step in terminal as plain text: True/False') parser.addoption('--pr_number', action='store', default=None, help='Pull Request number') parser.addoption('--testrail_report', action='store', default=False, help='boolean; For creating testrail report per run') parser.addoption('--network', action='store', default='ropsten', help='string; ropsten or rinkeby') parser.addoption('--rerun_count', action='store', default=0, help='How many times tests should be re-run if failed') parser.addoption("--run_testrail_ids", action="store", metavar="NAME", default=None, help="only run tests matching the environment NAME.") parser.addoption("--apk_upgrade", action="store", metavar="NAME", default=None, help='Url or local path to apk for upgrade') # chat bot parser.addoption('--messages_number', action='store', default=20, help='Messages number') parser.addoption('--public_keys', action='store', default='', help='List of public keys for one-to-one chats') parser.addoption('--running_time', action='store', default=600, help='Running time in seconds') parser.addoption('--chat_name', action='store', default='test_chat', help='Public chat name') parser.addoption('--device_number', action='store', default=2, help='Public chat name') # running tests using appium docker instance parser.addoption('--docker', action='store', default=False, help='Are you using the appium docker container to run the tests?') parser.addoption('--docker_shared_volume', action='store', default=None, help='Path to a directory with .apk that will be shared with docker instance. Test reports will be also saved there') parser.addoption('--device_ip', action='store', default=None, help='Android device IP address used for battery tests') parser.addoption('--bugreport', action='store', default=False, help='Should generate bugreport for each test?') parser.addoption('--stats_db_host', action='store', default=None, help='Host address for device stats database') parser.addoption('--stats_db_port', action='store', default=8086, help='Port for device stats db') parser.addoption('--stats_db_username', action='store', default=None, help='Username for device stats db') parser.addoption('--stats_db_password', action='store', default=None, help='Password for device stats db') parser.addoption('--stats_db_database', action='store', default='example9', help='Database name for device stats db') def is_master(config): return not hasattr(config, 'workerinput') def is_uploaded(): stored_files = sauce.storage.get_stored_files() for i in range(len(stored_files['files'])): if stored_files['files'][i]['name'] == test_suite_data.apk_name: return True def pytest_configure(config): tests.pytest_config_global = vars(config.option) config.addinivalue_line("markers", "testrail_id(name): empty") if config.getoption('log_steps'): import logging logging.basicConfig(level=logging.INFO) if config.getoption('env') != 'api': test_suite_data.apk_name = ([i for i in [i for i in config.getoption('apk').split('/') if '.apk' in i]])[0] if is_master(config): pr_number = config.getoption('pr_number') if config.getoption('testrail_report'): if pr_number: run_number = len(testrail_report.get_runs(pr_number)) + 1 run_name = 'PR-%s run #%s' % (pr_number, run_number) else: run_name = test_suite_data.apk_name testrail_report.add_run(run_name) if pr_number: from github import Github repo = Github(github_token).get_user('status-im').get_repo('status-react') pull = repo.get_pull(int(pr_number)) pull.get_commits()[0].create_status(state='pending', context='Mobile e2e tests', description='e2e tests are running') if config.getoption('env') == 'sauce': if not is_uploaded(): if 'http' in config.getoption('apk'): response = requests.get(config.getoption('apk'), stream=True) response.raise_for_status() file = BytesIO(response.content) del response requests.post('http://saucelabs.com/rest/v1/storage/' + sauce_username + '/' + test_suite_data.apk_name + '?overwrite=true', auth=(sauce_username, sauce_access_key), data=file, headers={'Content-Type': 'application/octet-stream'}) else: sauce.storage.upload_file(config.getoption('apk')) def pytest_unconfigure(config): if is_master(config): if config.getoption('testrail_report'): testrail_report.add_results() if config.getoption('pr_number'): from github import Github repo = Github(github_token).get_user('status-im').get_repo('status-react') pull = repo.get_pull(int(config.getoption('pr_number'))) comment = pull.create_issue_comment(github_report.build_html_report(testrail_report.run_id)) if not testrail_report.is_run_successful(): pull.get_commits()[0].create_status(state='failure', context='Mobile e2e tests', description='Failure - e2e tests are failed', target_url=comment.html_url) else: pull.get_commits()[0].create_status(state='success', context='Mobile e2e tests', description='Success - e2e tests are passed', target_url=comment.html_url) def should_save_device_stats(config): db_args = [config.getoption(option) for option in ('stats_db_host', 'stats_db_port', 'stats_db_username', 'stats_db_password', 'stats_db_database')] return all(db_args) @pytest.mark.hookwrapper def pytest_runtest_makereport(item, call): outcome = yield report = outcome.get_result() if report.when == 'call': is_sauce_env = item.config.getoption('env') == 'sauce' current_test = test_suite_data.current_test if report.failed: error = report.longreprtext exception = re.findall('E.*Message:|E.*Error:|E.*Failed:', error) if exception: error = error.replace(re.findall('E.*Message:|E.*Error:|E.*Failed:', report.longreprtext)[0], '') current_test.testruns[-1].error = error if is_sauce_env: update_sauce_jobs(current_test.name, current_test.testruns[-1].jobs, report.passed) if item.config.getoption('docker'): device_stats = appium_container.get_device_stats() if item.config.getoption('bugreport'): appium_container.generate_bugreport(item.name) build_name = item.config.getoption('apk') # Find type of tests that are run on the device if 'battery_consumption' in item.keywords._markers: test_group = 'battery_consumption' else: test_group = None if should_save_device_stats(item.config): device_stats_db = DeviceStatsDB( item.config.getoption('stats_db_host'), item.config.getoption('stats_db_port'), item.config.getoption('stats_db_username'), item.config.getoption('stats_db_password'), item.config.getoption('stats_db_database'), ) device_stats_db.save_stats(build_name, item.name, test_group, not report.failed, device_stats) def update_sauce_jobs(test_name, job_ids, passed): for job_id in job_ids.keys(): try: sauce.jobs.update_job(job_id, name=test_name, passed=passed) except (RemoteDisconnected, SauceException): pass def get_testrail_case_id(item): testrail_id = item.get_closest_marker('testrail_id') if testrail_id: return testrail_id.args[0] def pytest_runtest_setup(item): try: testrail_id = [mark.args[0] for mark in item.iter_markers(name='testrail_id')][0] except IndexError: pass run_testrail_ids = item.config.getoption("run_testrail_ids") if run_testrail_ids: if str(testrail_id) not in list(run_testrail_ids.split(",")): pytest.skip("test requires testrail case id %s" % testrail_id) test_suite_data.set_current_test(item.name, testrail_case_id=get_testrail_case_id(item)) test_suite_data.current_test.create_new_testrun() def pytest_runtest_protocol(item, nextitem): rerun_count = int(item.config.getoption('rerun_count')) for i in range(rerun_count): reports = runtestprotocol(item, nextitem=nextitem) for report in reports: if report.failed and should_rerun_test(report.longreprtext): break # rerun else: return True # no need to rerun @pytest.fixture(scope="session", autouse=False) def faucet_for_senders(): network_api = NetworkApi() for user in transaction_senders.values(): network_api.faucet(address=user['address']) @pytest.fixture def messages_number(request): return int(request.config.getoption('messages_number')) @pytest.fixture def message_wait_time(request): return int(request.config.getoption('message_wait_time')) @pytest.fixture def participants_number(request): return int(request.config.getoption('participants_number')) @pytest.fixture def chat_name(request): return request.config.getoption('chat_name') @pytest.fixture def user_public_key(request): return request.config.getoption('user_public_key') ```
[ { "content": "Here is the snippet:\n```python\n# Copyright (c) 2013 Johan Ceuppens.\n# All rights reserved.\n\n# Redistribution and use in source and binary forms are permitted\n# provided that the above copyright notice and this paragraph are\n# duplicated in all such forms and that any documentation,\n# adver...
[ { "content": "Here is the snippet:\n<|memory_start|>```python\n# Copyright (c) 2013 Johan Ceuppens.\n# All rights reserved.\n\n# Redistribution and use in source and binary forms are permitted\n# provided that the above copyright notice and this paragraph are\n# duplicated in all such forms and that any documen...
```python # Copyright (c) 2013 Johan Ceuppens. # All rights reserved. # Redistribution and use in source and binary forms are permitted # provided that the above copyright notice and this paragraph are # duplicated in all such forms and that any documentation, # advertising materials, and other materials related to such # distribution and use acknowledge that the software was developed # by the Johan Ceuppens. The name of the # Johan Ceuppens may not be used to endorse or promote products derived # from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. # Copyright (C) Johan Ceuppens 2010 # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import pygame from pygame.locals import * from gameobject import * from bullet import * from stateimagelibrary import * import random from time import * from math import * from random import * from rng import * class BomberToad(Gameobject): "Dude on Toad throwing Bombs" def __init__(self,xx,yy): Gameobject.__init__(self, xx, yy) self.w = 100 self.h = 100 self.hitpoints = 2 self.yy = yy self.stimlib = Stateimagelibrary() image = pygame.image.load('./pics/bomber-left-1.bmp').convert() image.set_colorkey((0,0,0)) self.stimlib.addpicture(image) image = pygame.image.load('./pics/bomber-left-2.bmp').convert() image.set_colorkey((0,0,0)) self.stimlib.addpicture(image) image = pygame.image.load('./pics/bomber-left-3.bmp').convert() image.set_colorkey((0,0,0)) self.stimlib.addpicture(image) image = pygame.image.load('./pics/bomber-left-4.bmp').convert() image.set_colorkey((0,0,0)) self.stimlib.addpicture(image) self.counter = 0 def draw(self, screen, room): if randint(0,100) != 100 and self.counter == 0: self.counter = 0 self.stimlib.drawstatic(screen, self.x-40+room.relativex,self.y+room.relativey, 0) else: self.counter += 1 self.stimlib.drawstatic(screen, self.x-40+room.relativex,self.y+room.relativey, self.counter) if self.counter >= 3: self.counter = 0 room.gameobjects.append(Bullet(self.x+room.relativex,self.y+room.relativey, "left")) def update(self,room,player): 1 def fight(self,room,player,keydown = -1): 1 ```
[ { "content": "Provide an exact copy of the source code:\n```python\nimport os\nimport time\nimport json\nimport string\nfrom collections import defaultdict, Counter\nfrom random import random\n\nimport tweepy\n\nclass TwitterAPI:\n \"\"\"\n Class for accessing the Twitter API.\n\n Requires API credenti...
[ { "content": "Provide an exact copy of the source code:\n<|memory_start|>```python\nimport os\nimport time\nimport json\nimport string\nfrom collections import defaultdict, Counter\nfrom random import random\n\nimport tweepy\n\nclass TwitterAPI:\n \"\"\"\n Class for accessing the Twitter API.\n\n Requi...
```python import os import time import json import string from collections import defaultdict, Counter from random import random import tweepy class TwitterAPI: """ Class for accessing the Twitter API. Requires API credentials to be available in environment variables. These will be set appropriately if the bot was created with init.sh included with the heroku-twitterbot-starter """ def __init__(self): consumer_key = "ZyyYUZVcGfbMBa644Ey77Tu5b" consumer_secret = "FgL9UAXDin6YQwR1ILqMdE8aCLG9wPkhKDm8wJibyNnWLem2kc" auth = tweepy.OAuthHandler(consumer_key, consumer_secret) access_token = "3299819928-mYYqdXnQmZTURU9iXaalXDq7BGnCESNfe7MGUJE" access_token_secret = "1pkxjxkpIPQCnAM0zEttaCHKezdlW5Co3x5B2KY1j40qI" auth.set_access_token(access_token, access_token_secret) self.api = tweepy.API(auth) def tweet(self, message): """Send a tweet""" self.api.update_status(status=message) def train_char_lm(fname, order=4): # data = file(fname).read() data = fname lm = defaultdict(Counter) pad = "~" * order data = pad + data for i in xrange(len(data)-order): history, char = data[i:i+order], data[i+order] lm[history][char]+=1 def normalize(counter): s = float(sum(counter.values())) return [(c,cnt/s) for c,cnt in counter.iteritems()] outlm = {hist:normalize(chars) for hist, chars in lm.iteritems()} return outlm def generate_letter(lm, history, order): history = history[-order:] dist = lm[history] x = random() for c,v in dist: x = x - v if x <= 0: return c def generate_text(lm, order, nletters=5000): history = "~" * order out = [] for i in xrange(nletters): c = generate_letter(lm, history, order) history = history[-order:] + c out.append(c) return "".join(out) # In[148]: def fix_unmatched(l): unmatched_locs = [] unmatched_locs_rev = [] def error(c, column_number): # print 'Error: unmatched', c, 'column', column_number if c in [')', ']', '}']: unmatched_locs.append(column_number) else: unmatched_locs_rev.append(column_number) def check(stack, wanted, c, column_number): if stack: if stack[-1] != wanted: error(c, column_number) else: stack.pop() else: error(c, column_number) def check_parentheses(line): stack = list() column_number = 0 for c in line: if c == '(' or c == '[' or c == '{': stack.append(c) elif c == ')': check(stack, '(', ')', column_number) elif c == ']': check(stack, '[', ']', column_number) elif c == '}': check(stack, '{', '}', column_number) column_number += 1 def check_parentheses_rev(line): stack = list() column_number = 0 for c in line: column_number += 1 if c == ')' or c == ']' or c == '}': stack.append(c) elif c == '(': check(stack, ')', '(', column_number) elif c == '[': check(stack, ']', '[', column_number) elif c == '{': check(stack, '}', '{', column_number) check_parentheses(l) lchars = list(l) newTitle = ''.join([i for j, i in enumerate(lchars) if j not in unmatched_locs]) check_parentheses_rev(newTitle[::-1]) real_unmatched_rev = map(lambda i: len(newTitle)-i, unmatched_locs_rev) titChars = list(newTitle) newTitle = ''.join([i for j, i in enumerate(titChars) if j not in real_unmatched_rev]) numDoubleQuotes = newTitle.count('\"') if numDoubleQuotes % 2: newTitle = string.replace(newTitle, '\"', '', 1) numSingleQuotes = newTitle.count("\'") if numSingleQuotes % 2: newTitle = string.replace(newTitle, "\'", "", 1) return newTitle def main(): generatedTexts = map(lambda lm: generate_text(lm, 7), lms) entry_candidates = map(lambda x: x.split('\n'), generatedTexts) def remove_plagiarized(i): plagiarized = set(entry_candidates[i]) & set(data[i]) keepers = map(fix_unmatched, list(set(entry_candidates[i]) - plagiarized)) return keepers entries = map(remove_plagiarized, range(len(data))) invented_art = zip(*entries) def unpack(tup): t, a, m = tup outstr = "%s\n%s\n%s" % (t, a, m) return outstr output = filter(lambda x: len(x) <= 140, map(unpack, invented_art)) return output fileObj = open('artworks.json', 'r') art = json.load(fileObj)[:75000] fileObj.close() print "Artwork list loaded..." titles = map(lambda d: d['title'], art) artists = map(lambda d: d['artist'], art) media = map(lambda d: d['medium'], art) print "Got titles, artists, media..." # dimensions = map(lambda d: d['dimensions'], art) data = [titles, artists, media] lms = map(lambda l: train_char_lm('\n'.join(l), order=7), data) print "Got language models..." if __name__ == "__main__": twitter = TwitterAPI() while True: toTweet = main() print "Got toTweet list..." while toTweet: curTweet = toTweet.pop() print "Posting tweet..." twitter.tweet(curTweet) print "...tweet posted!" time.sleep(120) ```
[ { "content": "Here is the script:\n```python\n# -*- coding: utf-8 -*-\n\"\"\"The gzip file path specification resolver helper implementation.\"\"\"\n\n# This is necessary to prevent a circular import.\nimport dfvfs.file_io.gzip_file_io\nimport dfvfs.vfs.gzip_file_system\n\nfrom dfvfs.lib import definitions\nfro...
[ { "content": "Here is the script:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\"\"\"The gzip file path specification resolver helper implementation.\"\"\"\n\n# This is necessary to prevent a circular import.\nimport dfvfs.file_io.gzip_file_io\nimport dfvfs.vfs.gzip_file_system\n\nfrom dfvfs.lib import ...
```python # -*- coding: utf-8 -*- """The gzip file path specification resolver helper implementation.""" # This is necessary to prevent a circular import. import dfvfs.file_io.gzip_file_io import dfvfs.vfs.gzip_file_system from dfvfs.lib import definitions from dfvfs.resolver import resolver from dfvfs.resolver import resolver_helper class GzipResolverHelper(resolver_helper.ResolverHelper): """Class that implements the gzip file resolver helper.""" TYPE_INDICATOR = definitions.TYPE_INDICATOR_GZIP def NewFileObject(self, resolver_context): """Creates a new file-like object. Args: resolver_context: the resolver context (instance of resolver.Context). Returns: The file-like object (instance of file_io.FileIO). """ return dfvfs.file_io.gzip_file_io.GzipFile(resolver_context) def NewFileSystem(self, resolver_context): """Creates a new file system object. Args: resolver_context: the resolver context (instance of resolver.Context). Returns: The file system object (instance of vfs.FileSystem). """ return dfvfs.vfs.gzip_file_system.GzipFileSystem(resolver_context) resolver.Resolver.RegisterHelper(GzipResolverHelper()) ```
[ { "content": "```python\nfrom typing import List\n\nimport typepy\nfrom mbstrdecoder import MultiByteStrDecoder\n\nfrom ....error import EmptyTableNameError\nfrom ._numpy import NumpyTableWriter\n\n\nclass PandasDataFrameWriter(NumpyTableWriter):\n \"\"\"\n A writer class for Pandas DataFrame format.\n\n ...
[ { "content": "<|memory_start|>```python\nfrom typing import List\n\nimport typepy\nfrom mbstrdecoder import MultiByteStrDecoder\n\nfrom ....error import EmptyTableNameError\nfrom ._numpy import NumpyTableWriter\n\n\nclass PandasDataFrameWriter(NumpyTableWriter):\n \"\"\"\n A writer class for Pandas DataFr...
```python from typing import List import typepy from mbstrdecoder import MultiByteStrDecoder from ....error import EmptyTableNameError from ._numpy import NumpyTableWriter class PandasDataFrameWriter(NumpyTableWriter): """ A writer class for Pandas DataFrame format. :Example: :ref:`example-pandas-dataframe-writer` .. py:attribute:: import_pandas_as :type: str :value: "pd" Specify ``pandas`` module import name of an output source code. .. py:attribute:: import_numpy_as :type: str :value: "np" Specify ``numpy`` module import name of an output source code. .. py:method:: write_table |write_table| with Pandas DataFrame format. The tabular data are written as a ``pandas.DataFrame`` class instance definition. :raises pytablewriter.EmptyTableNameError: If the |table_name| is empty. :Example: :ref:`example-pandas-dataframe-writer` .. note:: Specific values in the tabular data are converted when writing: - |None|: written as ``None`` - |inf|: written as ``numpy.inf`` - |nan|: written as ``numpy.nan`` - |datetime| instances determined by |is_datetime_instance_formatting| attribute: - |True|: written as `dateutil.parser <https://dateutil.readthedocs.io/en/stable/parser.html>`__ - |False|: written as |str| .. seealso:: :ref:`example-type-hint-python` """ FORMAT_NAME = "pandas" @property def format_name(self) -> str: return self.FORMAT_NAME def __init__(self, **kwargs) -> None: super().__init__(**kwargs) self.import_pandas_as = "pd" self.is_write_header = False def _get_opening_row_items(self) -> List[str]: return [f"{self.variable_name} = {self.import_pandas_as}.DataFrame(["] def _get_closing_row_items(self) -> List[str]: if typepy.is_not_empty_sequence(self.headers): return [ "], columns=[{}])".format( ", ".join( f'"{MultiByteStrDecoder(header).unicode_str}"' for header in self.headers ) ) ] return ["])"] def _verify_property(self) -> None: super()._verify_property() if typepy.is_null_string(self.table_name): raise EmptyTableNameError("table_name must be a string of one or more characters") ```
[ { "content": "Recreate the original code text:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport subprocess\nfrom os.path import exists\n\nfrom setuptools import setup\nfrom setuptools.extension import Extension\nfrom setuptools.command.build_py import build_py as _build_py\n\nfrom Cython.Distu...
[ { "content": "Recreate the original code text:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport subprocess\nfrom os.path import exists\n\nfrom setuptools import setup\nfrom setuptools.extension import Extension\nfrom setuptools.command.build_py import build_py as _build_py\n\nf...
```python #!/usr/bin/env python # -*- coding: utf-8 -*- import subprocess from os.path import exists from setuptools import setup from setuptools.extension import Extension from setuptools.command.build_py import build_py as _build_py from Cython.Distutils import build_ext class build_py(_build_py): if exists('lib/Makefile'): make_clean = subprocess.Popen(['make', 'distclean'], cwd='lib/') make_clean.wait() configure = subprocess.Popen(['./configure', '--with-pic'], cwd='lib/') configure.wait() make = subprocess.Popen(['make', '-j'], cwd='lib/') make.wait() setup( cmdclass={'build_py': build_py, 'build_ext': build_ext}, name='divsufsort', ext_modules=[ Extension( "suffixarray", sources=['src/suffix.pyx'], libraries=['lib/lib/.libs/libdivsufsort.a'], extra_objects=['lib/lib/.libs/libdivsufsort.a'], include_dirs=['lib/include'], language="c")]) ```
[ { "content": "Write the code verbatim:\n```python\n#%% Libraries\nimport numpy as np\nimport root_funcs as rf\n#%% predict_linear class (Module for Process_Node - Predict)\nclass predict_linear(object):\n \n __lookup_dict = {\n 'beta_init': {\n 'zeros': ('Cluster_Control.Cluster_IO.Predi...
[ { "content": "Write the code verbatim:\n<|memory_start|>```python\n#%% Libraries\nimport numpy as np\nimport root_funcs as rf\n#%% predict_linear class (Module for Process_Node - Predict)\nclass predict_linear(object):\n \n __lookup_dict = {\n 'beta_init': {\n 'zeros': ('Cluster_Control....
```python #%% Libraries import numpy as np import root_funcs as rf #%% predict_linear class (Module for Process_Node - Predict) class predict_linear(object): __lookup_dict = { 'beta_init': { 'zeros': ('Cluster_Control.Cluster_IO.Predict_Cluster.beta_init', 'beta_zeros') }, 'learn_rate': { 'partial_hessian': ('Cluster_Control.Cluster_IO.Predict_Cluster.Linear.learn_rate', 'partial_hessian'), 'manual': ('Cluster_Control.Cluster_IO.Predict_Cluster.Linear.learn_rate', 'manual') } } __class_type = 'predict_linear' def __init__(self, beta_init_type = 'zeros', learn_rate_type = 'partial_hessian'): self.__betas = np.empty((0, 0)) self.__subclasses = {} self.set_subclass('beta_init', beta_init_type) self.set_subclass('learn_rate', learn_rate_type) def get_type(self): return self.__class_type def set_subclass(self, subitem, subitem_type, *args, **kwargs): self.__subclasses[subitem] = (subitem_type, rf.upload_module(self.__lookup_dict[subitem][subitem_type])(*args, **kwargs)) def call_submethod(self, subitem, submethod, *args, **kwargs): return getattr(self.__subclasses[subitem][1], submethod)(*args, **kwargs) def get_subclass_type(self, subitem): return self.__subclasses.get(subitem, (None))[0] def init_betas(self, feature_count): self.__betas = self.call_submethod('beta_init', 'initialize', feature_count) def predict(self, input_data): return np.dot(input_data, self.__betas) ```
[ { "content": "```python\n#!/usr/bin/env python\nimport fileinput\nimport sys\n\n///<summary>\n///Provides a bunch of operations to format and manage hard disks.\n///\n///One cool thing that this class does is allow you to generate a fstab file\n///based on disk formatting operations conducted earlier using this...
[ { "content": "<|memory_start|>```python\n#!/usr/bin/env python\nimport fileinput\nimport sys\n\n///<summary>\n///Provides a bunch of operations to format and manage hard disks.\n///\n///One cool thing that this class does is allow you to generate a fstab file\n///based on disk formatting operations conducted ea...
```python #!/usr/bin/env python import fileinput import sys ///<summary> ///Provides a bunch of operations to format and manage hard disks. /// ///One cool thing that this class does is allow you to generate a fstab file ///based on disk formatting operations conducted earlier using this class. This ///is helpful when installing a new Gentoo installation. ///</summary> class BuildKernel: def compileNewKernel(self, kernelType, initSettings): f = fopen('/etc/superGentoo/kernel', 'w') f.write(kernelType + "," + initSettings) f.close() os.system("emerge " + kernelType) os.system("mv /usr/src/.config /usr/src/linux/.config") os.system("touch /usr/src/linux/.config") os.system("cd /usr/src/linux") os.system("make") os.system("make modules_install") os.system("cp arch/x86_64/boot/bzImage /boot/kernel-`find /usr/src -name linux-3* | awk -Flinux- '{print \$NF }'`") def upgradeKernel(self): kernelData = open('/etc/superGentoo/kernel').read(1000).split(",") os.system("emerge --update ". kernelData[0]) os.system() //--------------------------------------------------------------------------// // MAIN FUNCTION //--------------------------------------------------------------------------// if __name__ == '__main__': bk = BuildKernel() if sys.argv[1] == "upgrade": bk.upgradeKernel() elif sys.argv[1] == "newKernel": bk.compileNewKernel(sys.argv[2], sys.argv[3]) ```
[ { "content": "Recreate the original code text:\n```python\nfrom PyQt5.QtWidgets import QWidget, QSplitter, QVBoxLayout, QFrame, QFileDialog, QScrollArea, QMenuBar, QAction, QToolBar\r\nfrom PyQt5.QtCore import Qt\r\nfrom PyQt5.QtGui import QIcon\r\nfrom JamSpace.Views.LaneSpaceView import LaneSpaceView\r\nfrom ...
[ { "content": "Recreate the original code text:\n<|memory_start|>```python\nfrom PyQt5.QtWidgets import QWidget, QSplitter, QVBoxLayout, QFrame, QFileDialog, QScrollArea, QMenuBar, QAction, QToolBar\r\nfrom PyQt5.QtCore import Qt\r\nfrom PyQt5.QtGui import QIcon\r\nfrom JamSpace.Views.LaneSpaceView import LaneSp...
```python from PyQt5.QtWidgets import QWidget, QSplitter, QVBoxLayout, QFrame, QFileDialog, QScrollArea, QMenuBar, QAction, QToolBar from PyQt5.QtCore import Qt from PyQt5.QtGui import QIcon from JamSpace.Views.LaneSpaceView import LaneSpaceView from JamSpace.Views.ControlBar import ControlBar class MainView(QWidget): def __init__(self): super().__init__() # declare member variables: self.laneSpace = LaneSpaceView(parent=self) self.controlBar = ControlBar(parent=self) self.menuBar = QMenuBar(self) self.toolBar = QToolBar(self) self.toolBar.show() self.laneScrollArea = QScrollArea() self.laneScrollArea.setWidgetResizable(True) self.WIDTH = 900 self.HEIGHT = 700 # Initialise the UI: self.initUI() def initUI(self): self.setGeometry(20, 30, self.WIDTH, self.HEIGHT) self.setWindowTitle('JamSpace') # configure the menu bar: # create menus: fileMenu = self.menuBar.addMenu('&File') editMenu = self.menuBar.addMenu('&Edit') # create actions: self.exitAction = QAction('Exit', self) self.exitAction.setStatusTip('Close the application') self.addLaneAction = QAction(QIcon('addLaneIcon.png'), 'Add Lane', self) self.playAction = QAction(QIcon('playIcon.png'), 'Play', self) self.stopAction = QAction(QIcon('stopIcon.ico'), 'Stop', self) self.addLaneAction.setStatusTip('Add a new lane') self.playAction.setStatusTip('Start playback') self.stopAction.setStatusTip('Stop playback') # add the actions to the menus/toolbar: fileMenu.addAction(self.exitAction) self.toolBar.addAction(self.playAction) self.toolBar.addAction(self.stopAction) self.toolBar.addAction(self.addLaneAction) self.laneScrollArea.setWidget(self.laneSpace) # Instantiate UI components: laneEditSpace = QFrame(self) laneEditSpace.setFrameShape(QFrame.StyledPanel) clipEditSpace = QFrame(self) clipEditSpace.setFrameShape(QFrame.StyledPanel) # Apply layout: vSplitter = QSplitter(Qt.Vertical) hSplitter = QSplitter(Qt.Horizontal) hSplitter.addWidget(laneEditSpace) hSplitter.addWidget(clipEditSpace) vSplitter.addWidget(self.controlBar) vSplitter.addWidget(self.laneScrollArea) vSplitter.addWidget(hSplitter) vbox = QVBoxLayout(self) vbox.addWidget(vSplitter) #vbox.setAlignment(Qt.AlignTop) self.setLayout(vbox) self.show() def showDirectoryDialog(self): dirSelectionDialog = QFileDialog(self) projectDir = QFileDialog.getExistingDirectory(dirSelectionDialog, 'Select Project Folder') return projectDir ```
[ { "content": "Return the code exactly, with no changes:\n```python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDon't edit this file manually.\nUpdate it from QGIS console:\n\nfrom processing.tools.translation import updateTranslations\nupdateTranslations()\n\"\"\"\n\nfrom PyQt4.QtCore import QCoreApplication\n\ndef tra...
[ { "content": "Return the code exactly, with no changes:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDon't edit this file manually.\nUpdate it from QGIS console:\n\nfrom processing.tools.translation import updateTranslations\nupdateTranslations()\n\"\"\"\n\nfrom PyQt4.QtCore import QCoreApplic...
```python # -*- coding: utf-8 -*- """ Don't edit this file manually. Update it from QGIS console: from processing.tools.translation import updateTranslations updateTranslations() """ from PyQt4.QtCore import QCoreApplication def translationShadow(): """QGISAlgorithmProvider""" QCoreApplication.translate("SumLines", "Sum line lengths") QCoreApplication.translate("PointsInPolygon", "Count points in polygon") QCoreApplication.translate("PointsInPolygonWeighted", "Count points in polygon(weighted)") QCoreApplication.translate("PointsInPolygonUnique", "Count unique points in polygon") QCoreApplication.translate("BasicStatisticsStrings", "Basic statistics for text fields") QCoreApplication.translate("BasicStatisticsNumbers", "Basic statistics for numeric fields") QCoreApplication.translate("NearestNeighbourAnalysis", "Nearest neighbour analysis") QCoreApplication.translate("MeanCoords", "Mean coordinate(s)") QCoreApplication.translate("LinesIntersection", "Line intersections") QCoreApplication.translate("UniqueValues", "List unique values") QCoreApplication.translate("PointDistance", "Distance matrix") QCoreApplication.translate("ReprojectLayer", "Reproject layer") QCoreApplication.translate("ExportGeometryInfo", "Export/Add geometry columns") QCoreApplication.translate("Centroids", "Polygon centroids") QCoreApplication.translate("Delaunay", "Delaunay triangulation") QCoreApplication.translate("VoronoiPolygons", "Voronoi polygons") QCoreApplication.translate("SimplifyGeometries", "Simplify geometries") QCoreApplication.translate("DensifyGeometries", "Densify geometries") QCoreApplication.translate("DensifyGeometriesInterval", "Densify geometries given an interval") QCoreApplication.translate("MultipartToSingleparts", "Multipart to singleparts") QCoreApplication.translate("SinglePartsToMultiparts", "Singleparts to multipart") QCoreApplication.translate("PolygonsToLines", "Polygons to lines") QCoreApplication.translate("LinesToPolygons", "Lines to polygons") QCoreApplication.translate("ExtractNodes", "Extract nodes") QCoreApplication.translate("Eliminate", "Eliminate sliver polygons") QCoreApplication.translate("ConvexHull", "Convex hull") QCoreApplication.translate("FixedDistanceBuffer", "Fixed distance buffer") QCoreApplication.translate("VariableDistanceBuffer", "Variable distance buffer") QCoreApplication.translate("Dissolve", "Dissolve") QCoreApplication.translate("Difference", "Difference") QCoreApplication.translate("Intersection", "Intersection") QCoreApplication.translate("Union", "Union") QCoreApplication.translate("Clip", "Clip") QCoreApplication.translate("ExtentFromLayer", "Polygon from layer extent") QCoreApplication.translate("RandomSelection", "Random selection") QCoreApplication.translate("RandomSelectionWithinSubsets", "Random selection within subsets") QCoreApplication.translate("SelectByLocation", "Select by location") QCoreApplication.translate("RandomExtract", "Random extract") QCoreApplication.translate("DeleteHoles", "Delete holes") QCoreApplication.translate("RandomExtractWithinSubsets", "Random extract within subsets") QCoreApplication.translate("ExtractByLocation", "Extract by location") QCoreApplication.translate("SpatialJoin", "Join attributes by location") QCoreApplication.translate("RegularPoints", "Regular points") QCoreApplication.translate("SymmetricalDifference", "Symmetrical difference") QCoreApplication.translate("VectorSplit", "Split vector layer") QCoreApplication.translate("VectorGrid", "Vector grid") QCoreApplication.translate("DeleteColumn", "Delete column") QCoreApplication.translate("DeleteDuplicateGeometries", "Delete duplicate geometries") QCoreApplication.translate("TextToFloat", "Text to float") QCoreApplication.translate("ExtractByAttribute", "Extract by attribute") QCoreApplication.translate("SelectByAttribute", "Select by attribute") QCoreApplication.translate("Grid", "Create graticule") QCoreApplication.translate("Gridify", "Snap points to grid") QCoreApplication.translate("HubDistance", "Distance to nearest hub") QCoreApplication.translate("HubLines", "Hub lines") QCoreApplication.translate("Merge", "Merge vector layers") QCoreApplication.translate("GeometryConvert", "Convert geometry type") QCoreApplication.translate("AddTableField", "Add field to attributes table") QCoreApplication.translate("FieldsCalculator", "Field calculator") QCoreApplication.translate("SaveSelectedFeatures", "Save selected features") QCoreApplication.translate("JoinAttributes", "Join attributes table") QCoreApplication.translate("AutoincrementalField", "Add autoincremental field") QCoreApplication.translate("Explode", "Explode lines") QCoreApplication.translate("FieldsPyculator", "Advanced Python field calculator") QCoreApplication.translate("EquivalentNumField", "Create equivalent numerical field") QCoreApplication.translate("PointsLayerFromTable", "Points layer from table") QCoreApplication.translate("StatisticsByCategories", "Statistics by categories") QCoreApplication.translate("ConcaveHull", "Concave hull") QCoreApplication.translate("Polygonize", "Polygonize") QCoreApplication.translate("RasterLayerStatistics", "Raster layer statistics") QCoreApplication.translate("PointsDisplacement", "Points displacement") QCoreApplication.translate("ZonalStatistics", "Zonal Statistics") QCoreApplication.translate("PointsFromPolygons", "Generate points (pixel centroids) inside polygons") QCoreApplication.translate("PointsFromLines", "Generate points (pixel centroids) along line") QCoreApplication.translate("RandomPointsExtent", "Random points in extent") QCoreApplication.translate("RandomPointsLayer", "Random points in layer bounds") QCoreApplication.translate("RandomPointsPolygonsFixed", "Random points inside polygons (fixed)") QCoreApplication.translate("RandomPointsPolygonsVariable", "Random points inside polygons (variable)") QCoreApplication.translate("RandomPointsAlongLines", "Random points along line") QCoreApplication.translate("PointsToPaths", "Points to path") QCoreApplication.translate("PostGISExecuteSQL", "PostGIS execute SQL") QCoreApplication.translate("ImportIntoPostGIS", "Import into PostGIS") QCoreApplication.translate("SetVectorStyle", "Set style for vector layer") QCoreApplication.translate("SetRasterStyle", "Set style for raster layer") QCoreApplication.translate("SelectByExpression", "Select by expression") QCoreApplication.translate("HypsometricCurves", "Hypsometric curves") QCoreApplication.translate("SplitLinesWithLines", "Split lines with lines") QCoreApplication.translate("CreateConstantRaster", "Create constant raster layer") QCoreApplication.translate("FieldsMapper", "Refactor fields") QCoreApplication.translate("SelectByAttributeSum", "Select by attribute sum") QCoreApplication.translate("Datasources2Vrt", "Build virtual vector") QCoreApplication.translate("CheckValidity", "Check validity") QCoreApplication.translate("VectorLayerHistogram", "Vector layer histogram") QCoreApplication.translate("RasterLayerHistogram", "Raster layer histogram") QCoreApplication.translate("VectorLayerScatterplot", "Vector layer scatterplot") QCoreApplication.translate("MeanAndStdDevPlot", "Mean and standard deviation plot") QCoreApplication.translate("BarPlot", "Bar plot") QCoreApplication.translate("PolarPlot", "Polar plot") QCoreApplication.translate("ScriptAlgorithm", "Number of unique values in classes") QCoreApplication.translate("ScriptAlgorithm", "Create points along lines") QCoreApplication.translate("ScriptAlgorithm", "Keep n biggest parts") QCoreApplication.translate("ScriptAlgorithm", "Frequency analysis") QCoreApplication.translate("ScriptAlgorithm", "Fill holes") """ModelerOnlyAlgorithmProvider""" QCoreApplication.translate("CalculatorModelerAlgorithm", "Calculator") QCoreApplication.translate("RasterLayerBoundsAlgorithm", "Raster layer bounds") QCoreApplication.translate("VectorLayerBoundsAlgorithm", "Vector layer bounds") """GdalOgrAlgorithmProvider""" QCoreApplication.translate("nearblack", "Near black") QCoreApplication.translate("information", "Information") QCoreApplication.translate("warp", "Reproject raster layer") QCoreApplication.translate("translate", "Export raster layer") QCoreApplication.translate("rgb2pct", "RGB to PCT") QCoreApplication.translate("pct2rgb", "PCT to RGB") QCoreApplication.translate("merge", "Merge raster layers") QCoreApplication.translate("buildvrt", "Build Virtual Raster") QCoreApplication.translate("polygonize", "Vectorize raster layer") QCoreApplication.translate("gdaladdo", "Build overviews (pyramids)") QCoreApplication.translate("ClipByExtent", "Clip raster by extent") QCoreApplication.translate("ClipByMask", "Clip raster by mask layer") QCoreApplication.translate("contour", "Contour lines") QCoreApplication.translate("rasterize", "Rasterize (vector to raster)") QCoreApplication.translate("proximity", "Proximity (raster distance)") QCoreApplication.translate("sieve", "Remove small pixel clumps (nearest neighbour)") QCoreApplication.translate("fillnodata", "Fill nodata") QCoreApplication.translate("ExtractProjection", "Extract projection") QCoreApplication.translate("gdal2xyz", "gdal2xyz") QCoreApplication.translate("hillshade", "Hillshade") QCoreApplication.translate("slope", "Slope") QCoreApplication.translate("aspect", "Aspect") QCoreApplication.translate("tri", "TRI (Terrain Ruggedness Index)") QCoreApplication.translate("tpi", "TPI (Topographic Position Index)") QCoreApplication.translate("roughness", "Roughness") QCoreApplication.translate("ColorRelief", "Color relief") QCoreApplication.translate("GridInvDist", "Interpolate (Inverse distance weighting)") QCoreApplication.translate("GridAverage", "Interpolate (Average)") QCoreApplication.translate("GridNearest", "Interpolate (Nearest Neighbor)") QCoreApplication.translate("GridDataMetrics", "Interpolate (Data metrics)") QCoreApplication.translate("gdaltindex", "Tile Index") QCoreApplication.translate("gdalcalc", "Raster calculator") QCoreApplication.translate("rasterize_over", "Rasterize (write over existing raster)") QCoreApplication.translate("OgrInfo", "Information") QCoreApplication.translate("Ogr2Ogr", "Convert format") QCoreApplication.translate("Ogr2OgrClip", "Clip vectors by polygon") QCoreApplication.translate("Ogr2OgrClipExtent", "Clip vectors by extent") QCoreApplication.translate("Ogr2OgrToPostGis", "Import Vector into PostGIS database (new connection)") QCoreApplication.translate("Ogr2OgrToPostGisList", "Import Vector into PostGIS database (available connections)") QCoreApplication.translate("Ogr2OgrPointsOnLines", "Create points along lines") QCoreApplication.translate("Ogr2OgrBuffer", "Buffer vectors") QCoreApplication.translate("Ogr2OgrDissolve", "Dissolve polygons") QCoreApplication.translate("Ogr2OgrOneSideBuffer", "Single sided buffers (and offset lines) for lines") QCoreApplication.translate("Ogr2OgrTableToPostGisList", "Import layer/table as geometryless table into PostgreSQL database") QCoreApplication.translate("OgrSql", "Execute SQL on vector layer") """LidarToolsAlgorithmProvider""" """OTBAlgorithmProvider""" QCoreApplication.translate("OTBAlgorithm", "ExtractROI (standard)") QCoreApplication.translate("OTBAlgorithm", "ComputeModulusAndPhase-one (OneEntry)") QCoreApplication.translate("OTBAlgorithm", "EdgeExtraction (gradient)") QCoreApplication.translate("OTBAlgorithm", "GrayScaleMorphologicalOperation (opening)") QCoreApplication.translate("OTBAlgorithm", "RigidTransformResample (translation)") QCoreApplication.translate("OTBAlgorithm", "Pansharpening (lmvm)") QCoreApplication.translate("OTBAlgorithm", "DimensionalityReduction (napca)") QCoreApplication.translate("OTBAlgorithm", "TrainImagesClassifier (knn)") QCoreApplication.translate("OTBAlgorithm", "ExtractROI (fit)") QCoreApplication.translate("OTBAlgorithm", "Smoothing (gaussian)") QCoreApplication.translate("OTBAlgorithm", "Band Math") QCoreApplication.translate("OTBAlgorithm", "Image to KMZ Export") QCoreApplication.translate("OTBAlgorithm", "Smoothing (anidif)") QCoreApplication.translate("OTBAlgorithm", "BinaryMorphologicalOperation (closing)") QCoreApplication.translate("OTBAlgorithm", "Segmentation (watershed)") QCoreApplication.translate("OTBAlgorithm", "EdgeExtraction (sobel)") QCoreApplication.translate("OTBAlgorithm", "Split Image") QCoreApplication.translate("OTBAlgorithm", "ComputeConfusionMatrix (vector)") QCoreApplication.translate("OTBAlgorithm", "Exact Large-Scale Mean-Shift segmentation, step 4") QCoreApplication.translate("OTBAlgorithm", "Segmentation (meanshift)") QCoreApplication.translate("OTBAlgorithm", "RigidTransformResample (id)") QCoreApplication.translate("OTBAlgorithm", "DimensionalityReduction (ica)") QCoreApplication.translate("OTBAlgorithm", "Segmentation (mprofiles)") QCoreApplication.translate("OTBAlgorithm", "Radiometric Indices") QCoreApplication.translate("OTBAlgorithm", "SOM Classification") QCoreApplication.translate("OTBAlgorithm", "TrainImagesClassifier (ann)") QCoreApplication.translate("OTBAlgorithm", "Segmentation (cc)") QCoreApplication.translate("OTBAlgorithm", "Connected Component Segmentation") QCoreApplication.translate("OTBAlgorithm", "Image Classification") QCoreApplication.translate("OTBAlgorithm", "Read image information") QCoreApplication.translate("OTBAlgorithm", "Pansharpening (rcs)") QCoreApplication.translate("OTBAlgorithm", "Smoothing (mean)") QCoreApplication.translate("OTBAlgorithm", "BinaryMorphologicalOperation (dilate)") QCoreApplication.translate("OTBAlgorithm", "OrthoRectification (fit-to-ortho)") QCoreApplication.translate("OTBAlgorithm", "Mean Shift filtering (can be used as Exact Large-Scale Mean-Shift segmentation, step 1)") QCoreApplication.translate("OTBAlgorithm", "Images Concatenation") QCoreApplication.translate("OTBAlgorithm", "Line segment detection") QCoreApplication.translate("OTBAlgorithm", "OrthoRectification (epsg)") QCoreApplication.translate("OTBAlgorithm", "Exact Large-Scale Mean-Shift segmentation, step 3 (optional)") QCoreApplication.translate("OTBAlgorithm", "FusionOfClassifications (dempstershafer)") QCoreApplication.translate("OTBAlgorithm", "Concatenate") QCoreApplication.translate("OTBAlgorithm", "Stereo Framework") QCoreApplication.translate("OTBAlgorithm", "Exact Large-Scale Mean-Shift segmentation, step 2") QCoreApplication.translate("OTBAlgorithm", "DimensionalityReduction (maf)") QCoreApplication.translate("OTBAlgorithm", "TrainImagesClassifier (gbt)") QCoreApplication.translate("OTBAlgorithm", "ColorMapping (image)") QCoreApplication.translate("OTBAlgorithm", "ComputeConfusionMatrix (raster)") QCoreApplication.translate("OTBAlgorithm", "BinaryMorphologicalOperation (erode)") QCoreApplication.translate("OTBAlgorithm", "Hoover compare segmentation") QCoreApplication.translate("OTBAlgorithm", "GrayScaleMorphologicalOperation (dilate)") QCoreApplication.translate("OTBAlgorithm", "Image Envelope") QCoreApplication.translate("OTBAlgorithm", "TrainImagesClassifier (rf)") QCoreApplication.translate("OTBAlgorithm", "Haralick Texture Extraction") QCoreApplication.translate("OTBAlgorithm", "TrainImagesClassifier (bayes)") QCoreApplication.translate("OTBAlgorithm", "Images comparaison") QCoreApplication.translate("OTBAlgorithm", "Optical calibration") QCoreApplication.translate("OTBAlgorithm", "Segmentation (edison)") QCoreApplication.translate("OTBAlgorithm", "FusionOfClassifications (majorityvoting)") QCoreApplication.translate("OTBAlgorithm", "ColorMapping (custom)") QCoreApplication.translate("OTBAlgorithm", "TrainImagesClassifier (boost)") QCoreApplication.translate("OTBAlgorithm", "Classification Map Regularization") QCoreApplication.translate("OTBAlgorithm", "TrainImagesClassifier (libsvm)") QCoreApplication.translate("OTBAlgorithm", "Pansharpening (bayes)") QCoreApplication.translate("OTBAlgorithm", "GrayScaleMorphologicalOperation (closing)") QCoreApplication.translate("OTBAlgorithm", "Rescale Image") QCoreApplication.translate("OTBAlgorithm", "ColorMapping (optimal)") QCoreApplication.translate("OTBAlgorithm", "DimensionalityReduction (pca)") QCoreApplication.translate("OTBAlgorithm", "Local Statistic Extraction") QCoreApplication.translate("OTBAlgorithm", "Compute Images second order statistics") QCoreApplication.translate("OTBAlgorithm", "TrainImagesClassifier (svm)") QCoreApplication.translate("OTBAlgorithm", "RigidTransformResample (rotation)") QCoreApplication.translate("OTBAlgorithm", "GrayScaleMorphologicalOperation (erode)") QCoreApplication.translate("OTBAlgorithm", "Unsupervised KMeans image classification") QCoreApplication.translate("OTBAlgorithm", "ComputeModulusAndPhase-two (TwoEntries)") QCoreApplication.translate("OTBAlgorithm", "TrainImagesClassifier (dt)") QCoreApplication.translate("OTBAlgorithm", "Superimpose sensor") QCoreApplication.translate("OTBAlgorithm", "Image Tile Fusion") QCoreApplication.translate("OTBAlgorithm", "OrthoRectification (utm)") QCoreApplication.translate("OTBAlgorithm", "OrthoRectification (lambert-WGS84)") QCoreApplication.translate("OTBAlgorithm", "EdgeExtraction (touzi)") QCoreApplication.translate("OTBAlgorithm", "Multivariate alteration detector") QCoreApplication.translate("OTBAlgorithm", "ColorMapping (continuous)") QCoreApplication.translate("OTBAlgorithm", "BinaryMorphologicalOperation (opening)") """RAlgorithmProvider""" QCoreApplication.translate("RAlgorithm", "Tobler") QCoreApplication.translate("RAlgorithm", "ACP contribution") QCoreApplication.translate("RAlgorithm", "scatterplot regressione") QCoreApplication.translate("RAlgorithm", "Kernel density estimation") QCoreApplication.translate("RAlgorithm", "Minimum convex polygon") QCoreApplication.translate("RAlgorithm", "CART") QCoreApplication.translate("RAlgorithm", "Summary statistics") QCoreApplication.translate("RAlgorithm", "frequency plot") QCoreApplication.translate("RAlgorithm", "ACP individus") QCoreApplication.translate("RAlgorithm", "Distance") QCoreApplication.translate("RAlgorithm", "Kriging") QCoreApplication.translate("RAlgorithm", "scatterplot log") QCoreApplication.translate("RAlgorithm", "Selection with Bayesian Information Criterion") QCoreApplication.translate("RAlgorithm", "Extract points from line") QCoreApplication.translate("RAlgorithm", "ACP var") QCoreApplication.translate("RAlgorithm", "qqplot") QCoreApplication.translate("RAlgorithm", "Krigeage selection") QCoreApplication.translate("RAlgorithm", "Frequency table") QCoreApplication.translate("RAlgorithm", "Advanced raster histogram") QCoreApplication.translate("RAlgorithm", "Density curve") QCoreApplication.translate("RAlgorithm", "CAH") QCoreApplication.translate("RAlgorithm", "reseau voisin") QCoreApplication.translate("RAlgorithm", "Relative distribution (raster covariate)") QCoreApplication.translate("RAlgorithm", "Histogram") QCoreApplication.translate("RAlgorithm", "Random sampling grid") QCoreApplication.translate("RAlgorithm", "selection Cp") QCoreApplication.translate("RAlgorithm", "Quadrat analysis") QCoreApplication.translate("RAlgorithm", "ggplot scatterplot") QCoreApplication.translate("RAlgorithm", "selection adjr2") QCoreApplication.translate("RAlgorithm", "Monte-Carlo spatial randomness") QCoreApplication.translate("RAlgorithm", "Douglas-Peucker with choice") QCoreApplication.translate("RAlgorithm", "Inverse Distance Weigthing with method selection") QCoreApplication.translate("RAlgorithm", "AFC") QCoreApplication.translate("RAlgorithm", "ponderation selection") QCoreApplication.translate("RAlgorithm", "alpha shape") QCoreApplication.translate("RAlgorithm", "a-star") QCoreApplication.translate("RAlgorithm", "Ripley - Rasson spatial domain") QCoreApplication.translate("RAlgorithm", "Selection Cp") QCoreApplication.translate("RAlgorithm", "Selection with criterion choice") QCoreApplication.translate("RAlgorithm", "Alpha shape") QCoreApplication.translate("RAlgorithm", "Close neighbor") QCoreApplication.translate("RAlgorithm", "Kolmogrov-Smirnov test") QCoreApplication.translate("RAlgorithm", "Droite") QCoreApplication.translate("RAlgorithm", "regression multiple") QCoreApplication.translate("RAlgorithm", "Douglas-Peucker") QCoreApplication.translate("RAlgorithm", "ponderation") QCoreApplication.translate("RAlgorithm", "regression") QCoreApplication.translate("RAlgorithm", "A-star") QCoreApplication.translate("RAlgorithm", "Kriging with model selection") QCoreApplication.translate("RAlgorithm", "Courbe densite") QCoreApplication.translate("RAlgorithm", "kernel") QCoreApplication.translate("RAlgorithm", "selection critere") QCoreApplication.translate("RAlgorithm", "ANOVA") QCoreApplication.translate("RAlgorithm", "F function") QCoreApplication.translate("RAlgorithm", "Krigeage") QCoreApplication.translate("RAlgorithm", "Multiple Regression") QCoreApplication.translate("RAlgorithm", "Simple Linear Regression") QCoreApplication.translate("RAlgorithm", "scatterplot types") QCoreApplication.translate("RAlgorithm", "Polygone") QCoreApplication.translate("RAlgorithm", "Autocor spatiale") QCoreApplication.translate("RAlgorithm", "G function") QCoreApplication.translate("RAlgorithm", "Selection with r2") QCoreApplication.translate("RAlgorithm", "Inverse Distance Weigthing") QCoreApplication.translate("RAlgorithm", "douglas choix-dept") QCoreApplication.translate("RAlgorithm", "Selection with r2 adjusted") QCoreApplication.translate("RAlgorithm", "Raster histogram") QCoreApplication.translate("RAlgorithm", "selection r2") QCoreApplication.translate("RAlgorithm", "AFDM") QCoreApplication.translate("RAlgorithm", "selection BIC") QCoreApplication.translate("RAlgorithm", "Relative distribution (distance covariate)") QCoreApplication.translate("RAlgorithm", "Regular sampling grid") QCoreApplication.translate("RAlgorithm", "ACP cercle") """SagaAlgorithmProvider""" """GrassAlgorithmProvider""" QCoreApplication.translate("GrassAlgorithm", "r.thin - Thins non-zero cells that denote linear features in a raster layer.") QCoreApplication.translate("GrassAlgorithm", "v.hull - Produces a convex hull for a given vector map.") QCoreApplication.translate("GrassAlgorithm", "r.mfilter.fp - Raster map matrix filter.") QCoreApplication.translate("GrassAlgorithm", "r.horizon - Horizon angle computation from a digital elevation model.") QCoreApplication.translate("GrassAlgorithm", "r.terraflow - Flow computation for massive grids (float version).") QCoreApplication.translate("GrassAlgorithm", "r.fill.dir - Filters and generates a depressionless elevation layer and a flow direction layer from a given elevation raster layer.") QCoreApplication.translate("GrassAlgorithm", "r.coin - Tabulates the mutual occurrence (coincidence) of categories for two raster map layers.") QCoreApplication.translate("GrassAlgorithm", "v.surf.rst.line - Spatial approximation and topographic analysis using regularized spline with tension.") QCoreApplication.translate("GrassAlgorithm", "v.clean.advanced - Toolset for cleaning topology of vector map (Advanced).") QCoreApplication.translate("GrassAlgorithm", "v.lidar.correction - Correction of the v.lidar.growing output. It is the last of the three algorithms for LIDAR filtering.") QCoreApplication.translate("GrassAlgorithm", "r.out.gridatb - Exports GRASS raster map to GRIDATB.FOR map file (TOPMODEL)") QCoreApplication.translate("GrassAlgorithm", "v.surf.rst - Spatial approximation and topographic analysis using regularized spline with tension.") QCoreApplication.translate("GrassAlgorithm", "r.terraflow.short - Flow computation for massive grids (integer version).") QCoreApplication.translate("GrassAlgorithm", "r.univar - Calculates univariate statistics from the non-null cells of a raster map.") QCoreApplication.translate("GrassAlgorithm", "r.slope - Generates raster maps of slope from a elevation raster map.") QCoreApplication.translate("GrassAlgorithm", "r.random.cells - Generates random cell values with spatial dependence.") QCoreApplication.translate("GrassAlgorithm", "v.transform - Performs an affine transformation on a vector layer.") QCoreApplication.translate("GrassAlgorithm", "v.dissolve - Dissolves boundaries between adjacent areas sharing a common category number or attribute.") QCoreApplication.translate("GrassAlgorithm", "v.lidar.growing - Building contour determination and Region Growing algorithm for determining the building inside") QCoreApplication.translate("GrassAlgorithm", "v.extract - Selects vector objects from a vector layer a new layer containing only the selected objects.") QCoreApplication.translate("GrassAlgorithm", "nviz - Visualization and animation tool for GRASS data.") QCoreApplication.translate("GrassAlgorithm", "r.kappa - Calculate error matrix and kappa parameter for accuracy assessment of classification result.") QCoreApplication.translate("GrassAlgorithm", "r.contour.step - Create vector contours from raster at specified steps") QCoreApplication.translate("GrassAlgorithm", "r.average - Finds the average of values in a cover raster layer within areas assigned the same category value in a user-specified base layer.") QCoreApplication.translate("GrassAlgorithm", "v.buffer.distance - Creates a buffer around features of given type.") QCoreApplication.translate("GrassAlgorithm", "r.rescale - Rescales the range of category values in a raster layer.") QCoreApplication.translate("GrassAlgorithm", "r.reclass - Creates a new map layer whose category values are based upon a reclassification of the categories in an existing raster map layer.") QCoreApplication.translate("GrassAlgorithm", "v.kernel - Generates a raster density map from vector point data using a moving kernel or optionally generates a vector density map on a vector network.") QCoreApplication.translate("GrassAlgorithm", "v.distance - Finds the nearest element in vector map 'to' for elements in vector map 'from'.") QCoreApplication.translate("GrassAlgorithm", "r.grow.distance - Generates a raster layer of distance to features in input layer.") QCoreApplication.translate("GrassAlgorithm", "v.surf.bspline.lambda - Bicubic or bilinear spline interpolation with Tykhonov regularization.") QCoreApplication.translate("GrassAlgorithm", "r.mode - Finds the mode of values in a cover layer within areas assigned the same category value in a user-specified base layer.") QCoreApplication.translate("GrassAlgorithm", "r.regression.line - Calculates linear regression from two raster layers : y = a + b*x.") QCoreApplication.translate("GrassAlgorithm", "r.topidx - Creates topographic index layer from elevation raster layer") QCoreApplication.translate("GrassAlgorithm", "r.plane - Creates raster plane layer given dip (inclination), aspect (azimuth) and one point.") QCoreApplication.translate("GrassAlgorithm", "r.mapcalculator - Calculate new raster map from a r.mapcalc expression.") QCoreApplication.translate("GrassAlgorithm", "v.info - Outputs basic information about a user-specified vector map.") QCoreApplication.translate("GrassAlgorithm", "v.segment - Creates points/segments from input vector lines and positions.") QCoreApplication.translate("GrassAlgorithm", "v.drape - Converts vector map to 3D by sampling of elevation raster map.") QCoreApplication.translate("GrassAlgorithm", "r.cost.full - Creates a raster layer of cumulative cost of moving across a raster layer whose cell values represent cost.") QCoreApplication.translate("GrassAlgorithm", "i.ifft - Inverse Fast Fourier Transform (IFFT) for image processing.") QCoreApplication.translate("GrassAlgorithm", "r.circle - Creates a raster map containing concentric rings around a given point.") QCoreApplication.translate("GrassAlgorithm", "r.water.outlet - Watershed basin creation program.") QCoreApplication.translate("GrassAlgorithm", "r.resample - GRASS raster map layer data resampling capability using nearest neighbors.") QCoreApplication.translate("GrassAlgorithm", "v.sample - Samples a raster layer at vector point locations.") QCoreApplication.translate("GrassAlgorithm", "r.quantile - Compute quantiles using two passes.") QCoreApplication.translate("GrassAlgorithm", "r.statistics - Calculates category or object oriented statistics.") QCoreApplication.translate("GrassAlgorithm", "v.reclass - Changes vector category values for an existing vector map according to results of SQL queries or a value in attribute table column.") QCoreApplication.translate("GrassAlgorithm", "r.out.ppm - Converts a raster layer to a PPM image file at the pixel resolution of the currently defined region.") QCoreApplication.translate("GrassAlgorithm", "r.resamp.stats - Resamples raster layers to a coarser grid using aggregation.") QCoreApplication.translate("GrassAlgorithm", "i.zc - Zero-crossing \"edge detection\" raster function for image processing.") QCoreApplication.translate("GrassAlgorithm", "r.lake.coords - Fills lake at given point to given level.") QCoreApplication.translate("GrassAlgorithm", "r.surf.contour - Surface generation program from rasterized contours.") QCoreApplication.translate("GrassAlgorithm", "v.kcv - Randomly partition points into test/train sets.") QCoreApplication.translate("GrassAlgorithm", "v.out.pov - Converts to POV-Ray format, GRASS x,y,z -> POV-Ray x,z,y") QCoreApplication.translate("GrassAlgorithm", "v.report - Reports geometry statistics for vectors.") QCoreApplication.translate("GrassAlgorithm", "r.out.xyz - Export a raster map to a text file as x,y,z values based on cell centers") QCoreApplication.translate("GrassAlgorithm", "r.carve - Takes vector stream data, transforms it to raster and subtracts depth from the output DEM.") QCoreApplication.translate("GrassAlgorithm", "v.to.rast.value - Converts (rasterize) a vector layer into a raster layer.") QCoreApplication.translate("GrassAlgorithm", "v.outlier - Removes outliers from vector point data.") QCoreApplication.translate("GrassAlgorithm", "r.surf.random - Produces a raster layer of uniform random deviates whose range can be expressed by the user.") QCoreApplication.translate("GrassAlgorithm", "r.resamp.rst - Reinterpolates using regularized spline with tension and smoothing.") QCoreApplication.translate("GrassAlgorithm", "r.bitpattern - Compares bit patterns with a raster map.") QCoreApplication.translate("GrassAlgorithm", "r.covar - Outputs a covariance/correlation matrix for user-specified raster layer(s).") QCoreApplication.translate("GrassAlgorithm", "r.shaded.relief - Creates shaded relief from an elevation layer (DEM).") QCoreApplication.translate("GrassAlgorithm", "v.generalize - Vector based generalization.") QCoreApplication.translate("GrassAlgorithm", "v.distance.toattr - Finds the nearest element in vector map 'to' for elements in vector map 'from'.") QCoreApplication.translate("GrassAlgorithm", "r.info - Output basic information about a raster layer.") QCoreApplication.translate("GrassAlgorithm", "r.his - Generates red, green and blue raster layers combining hue, intensity and saturation (HIS) values from user-specified input raster layers.") QCoreApplication.translate("GrassAlgorithm", "r.aspect - Generates raster maps of aspect from a elevation raster map.") QCoreApplication.translate("GrassAlgorithm", "v.split.length - Split lines to shorter segments by length.") QCoreApplication.translate("GrassAlgorithm", "r.sim.sediment - Sediment transport and erosion/deposition simulation using path sampling method (SIMWE).") QCoreApplication.translate("GrassAlgorithm", "r.patch - Creates a composite raster layer by using one (or more) layer(s) to fill in areas of \"no data\" in another map layer.") QCoreApplication.translate("GrassAlgorithm", "r.reclass.area.greater - Reclassifies a raster layer, selecting areas larger than a user specified size") QCoreApplication.translate("GrassAlgorithm", "r.horizon.height - Horizon angle computation from a digital elevation model.") QCoreApplication.translate("GrassAlgorithm", "r.sun - Solar irradiance and irradiation model.") QCoreApplication.translate("GrassAlgorithm", "v.clean - Toolset for cleaning topology of vector map.") QCoreApplication.translate("GrassAlgorithm", "r.recode - Recodes categorical raster maps.") QCoreApplication.translate("GrassAlgorithm", "v.parallel - Creates parallel line to input vector lines.") QCoreApplication.translate("GrassAlgorithm", "v.random - Randomly generate a 2D/3D vector points map.") QCoreApplication.translate("GrassAlgorithm", "r.describe - Prints terse list of category values found in a raster layer.") QCoreApplication.translate("GrassAlgorithm", "r.surf.gauss - Creates a raster layer of Gaussian deviates.") QCoreApplication.translate("GrassAlgorithm", "v.normal - Tests for normality for points.") QCoreApplication.translate("GrassAlgorithm", "i.his.rgb - Transforms raster maps from HIS (Hue-Intensity-Saturation) color space to RGB (Red-Green-Blue) color space.") QCoreApplication.translate("GrassAlgorithm", "r.report - Reports statistics for raster layers.") QCoreApplication.translate("GrassAlgorithm", "r.series - Makes each output cell value a function of the values assigned to the corresponding cells in the input raster layers.") QCoreApplication.translate("GrassAlgorithm", "m.cogo - A simple utility for converting bearing and distance measurements to coordinates and vice versa. It assumes a cartesian coordinate system") QCoreApplication.translate("GrassAlgorithm", "i.atcorr - Performs atmospheric correction using the 6S algorithm.") QCoreApplication.translate("GrassAlgorithm", "r.mfilter - Performs raster map matrix filter.") QCoreApplication.translate("GrassAlgorithm", "r.lake.layer - Fills lake at given point to given level.") QCoreApplication.translate("GrassAlgorithm", "r.drain.coordinate - Traces a flow through an elevation model on a raster map.") QCoreApplication.translate("GrassAlgorithm", "r.to.vect - Converts a raster into a vector layer.") QCoreApplication.translate("GrassAlgorithm", "v.voronoi - Creates a Voronoi diagram from an input vector layer containing points.") QCoreApplication.translate("GrassAlgorithm", "v.transform.pointsfile - Performs an affine transformation on a vector layer, using a support point file.") QCoreApplication.translate("GrassAlgorithm", "v.neighbors - Makes each cell value a function of attribute values and stores in an output raster map.") QCoreApplication.translate("GrassAlgorithm", "r.basins.fill - Generates watershed subbasins raster map.") QCoreApplication.translate("GrassAlgorithm", "r.flow - Construction of slope curves (flowlines), flowpath lengths, and flowline densities (upslope areas) from a raster digital elevation model (DEM).") QCoreApplication.translate("GrassAlgorithm", "r.cross - Creates a cross product of the category values from multiple raster map layers.") QCoreApplication.translate("GrassAlgorithm", "v.mkgrid - Creates a GRASS vector layer of a user-defined grid.") QCoreApplication.translate("GrassAlgorithm", "v.out.dxf - Exports GRASS vector map layers to DXF file format.") QCoreApplication.translate("GrassAlgorithm", "r.neighbors - Makes each cell category value a function of the category values assigned to the cells around it") QCoreApplication.translate("GrassAlgorithm", "r.median - Finds the median of values in a cover layer within areas assigned the same category value in a user-specified base layer.") QCoreApplication.translate("GrassAlgorithm", "r.cost.full.raster - Creates a raster layer of cumulative cost of moving across a raster layer whose cell values represent cost.") QCoreApplication.translate("GrassAlgorithm", "r.watershed - Watershed basin analysis program.") QCoreApplication.translate("GrassAlgorithm", "r.gwflow - Numerical calculation program for transient, confined and unconfined groundwater flow in two dimensions.") QCoreApplication.translate("GrassAlgorithm", "r.buffer - Creates a raster map layer showing buffer zones surrounding cells that contain non-NULL category values.") QCoreApplication.translate("GrassAlgorithm", "r.clump - Recategorizes data in a raster map by grouping cells that form physically discrete areas into unique categories.") QCoreApplication.translate("GrassAlgorithm", "v.buffer.column - Creates a buffer around features of given type.") QCoreApplication.translate("GrassAlgorithm", "i.fft - Fast Fourier Transform (FFT) for image processing.") QCoreApplication.translate("GrassAlgorithm", "r.spreadpath - Recursively traces the least cost path backwards to cells from which the cumulative cost was determined.") QCoreApplication.translate("GrassAlgorithm", "r.profile - Outputs the raster layer values lying on user-defined line(s).") QCoreApplication.translate("GrassAlgorithm", "v.to.rast.attribute - Converts (rasterize) a vector layer into a raster layer.") QCoreApplication.translate("GrassAlgorithm", "r.param.scale - Extracts terrain parameters from a DEM.") QCoreApplication.translate("GrassAlgorithm", "v.db.select - Prints vector map attributes") QCoreApplication.translate("GrassAlgorithm", "r.sunmask - Calculates cast shadow areas from sun position and elevation raster map.") QCoreApplication.translate("GrassAlgorithm", "v.surf.rst.cvdev.line - Spatial approximation and topographic analysis using regularized spline with tension.") QCoreApplication.translate("GrassAlgorithm", "r.random.raster - Create random raster") QCoreApplication.translate("GrassAlgorithm", "v.surf.bspline - Bicubic or bilinear spline interpolation with Tykhonov regularization.") QCoreApplication.translate("GrassAlgorithm", "r.rescale.eq - Rescales histogram equalized the range of category values in a raster layer.") QCoreApplication.translate("GrassAlgorithm", "r.cost - Creates a raster layer of cumulative cost of moving across a raster layer whose cell values represent cost.") QCoreApplication.translate("GrassAlgorithm", "v.surf.idw - Surface interpolation from vector point data by Inverse Distance Squared Weighting.") QCoreApplication.translate("GrassAlgorithm", "r.bilinear - Bilinear interpolation utility for raster map layers.") QCoreApplication.translate("GrassAlgorithm", "r.resamp.interp - Resamples a raster map layer to a finer grid using interpolation.") QCoreApplication.translate("GrassAlgorithm", "r.out.vrml - Export a raster layer to the Virtual Reality Modeling Language (VRML)") QCoreApplication.translate("GrassAlgorithm", "r.volume - Calculates the volume of data \"clumps\".") QCoreApplication.translate("GrassAlgorithm", "r.los - Line-of-sight raster analysis program.") QCoreApplication.translate("GrassAlgorithm", "v.patch - Create a new vector map layer by combining other vector map layers.") QCoreApplication.translate("GrassAlgorithm", "r.random - Creates a raster layer and vector point map containing randomly located points.") QCoreApplication.translate("GrassAlgorithm", "i.rgb.his - Transforms raster maps from RGB (Red-Green-Blue) color space to HIS (Hue-Intensity-Saturation) color space.") QCoreApplication.translate("GrassAlgorithm", "r.composite - Combines red, green and blue raster maps into a single composite raster map.") QCoreApplication.translate("GrassAlgorithm", "v.delaunay - Creates a Delaunay triangulation from an input vector map containing points or centroids.") QCoreApplication.translate("GrassAlgorithm", "r.contour.level - Create vector contour from raster at specified levels") QCoreApplication.translate("GrassAlgorithm", "v.univar - Calculates univariate statistics for attribute. Variance and standard deviation is calculated only for points if specified.") QCoreApplication.translate("GrassAlgorithm", "r.walk - Outputs a raster layer showing the anisotropic cumulative cost of moving based on friction cost.") QCoreApplication.translate("GrassAlgorithm", "v.class - Classifies attribute data, e.g. for thematic mapping.") QCoreApplication.translate("GrassAlgorithm", "r.spread - Simulates elliptically anisotropic spread on a graphics window and generates a raster map of the cumulative time of spread, given raster maps containing the rates of spread (ROS), the ROS directions and the spread origins.") QCoreApplication.translate("GrassAlgorithm", "v.overlay - Overlays two vector maps.") QCoreApplication.translate("GrassAlgorithm", "r.surf.idw2 - Surface generation.") QCoreApplication.translate("GrassAlgorithm", "v.in.dxf - Converts files in DXF format to GRASS vector map format.") QCoreApplication.translate("GrassAlgorithm", "r.drain - Traces a flow through an elevation model on a raster map.") QCoreApplication.translate("GrassAlgorithm", "r.sum - Sums up the raster cell values.") QCoreApplication.translate("GrassAlgorithm", "r.slope.aspect - Generates raster layers of slope, aspect, curvatures and partial derivatives from a elevation raster layer.") QCoreApplication.translate("GrassAlgorithm", "v.surf.bspline.sparse - Bicubic or bilinear spline interpolation with Tykhonov regularization.") QCoreApplication.translate("GrassAlgorithm", "r.grow - Generates a raster layer with contiguous areas grown by one cell.") QCoreApplication.translate("GrassAlgorithm", "v.qcount - Indices for quadrant counts of sites lists.") QCoreApplication.translate("GrassAlgorithm", "r.quant - Produces the quantization file for a floating-point map.") QCoreApplication.translate("GrassAlgorithm", "r.fillnulls - Fills no-data areas in a raster layer using v.surf.rst splines interpolation or v.surf.bspline interpolation") QCoreApplication.translate("GrassAlgorithm", "v.perturb - Random location perturbations of GRASS vector points") QCoreApplication.translate("GrassAlgorithm", "r.stats - Generates area statistics for raster layers.") QCoreApplication.translate("GrassAlgorithm", "r.sim.water - Overland flow hydrologic simulation using path sampling method (SIMWE).") QCoreApplication.translate("GrassAlgorithm", "v.to.points - Create points along input lines") QCoreApplication.translate("GrassAlgorithm", "r.reclass.area.lesser - Reclassifies a raster layer, selecting areas lower than a user specified size") QCoreApplication.translate("GrassAlgorithm", "v.split.vert - Split lines to shorter segments by max number of vertices.") QCoreApplication.translate("GrassAlgorithm", "v.surf.rst.cvdev - Spatial approximation and topographic analysis using regularized spline with tension.") QCoreApplication.translate("GrassAlgorithm", "r.ros - Generates three, or four raster map layers showing 1) the base (perpendicular) rate of spread (ROS), 2) the maximum (forward) ROS, 3) the direction of the maximum ROS, and optionally 4) the maximum potential spotting distance.") QCoreApplication.translate("GrassAlgorithm", "v.lidar.edgedetection - Detects the object's edges from a LIDAR data set.") QCoreApplication.translate("GrassAlgorithm", "r.surf.idw - Surface interpolation utility for raster layers.") QCoreApplication.translate("GrassAlgorithm", "v.select - Selects features from vector map (A) by features from other vector map (B).") QCoreApplication.translate("GrassAlgorithm", "r.surf.area - Surface area estimation for rasters.") QCoreApplication.translate("GrassAlgorithm", "v.in.wfs - Import GetFeature from WFS") QCoreApplication.translate("nviz", "nviz") """Grass7AlgorithmProvider""" """ScriptAlgorithmProvider""" QCoreApplication.translate("ScriptAlgorithm", "Number of unique values in classes") QCoreApplication.translate("ScriptAlgorithm", "Hex grid from layer bounds") QCoreApplication.translate("ScriptAlgorithm", "Batch string replace via regex dictionary") QCoreApplication.translate("ScriptAlgorithm", "test help") QCoreApplication.translate("ScriptAlgorithm", "Points on touching lines") QCoreApplication.translate("ScriptAlgorithm", "Create rasters from canvas for each vector feature extent") QCoreApplication.translate("ScriptAlgorithm", "Points on crossing lines") QCoreApplication.translate("ScriptAlgorithm", "Points from vector") QCoreApplication.translate("ScriptAlgorithm", "Square grid from layer extent") QCoreApplication.translate("ScriptAlgorithm", "Create vector layer from SQL Query") QCoreApplication.translate("ScriptAlgorithm", "Read file content into string") QCoreApplication.translate("ScriptAlgorithm", "Assing predominant category") QCoreApplication.translate("ScriptAlgorithm", "Extract raster values to shapefile") QCoreApplication.translate("ScriptAlgorithm", "Buffer Contour") QCoreApplication.translate("ScriptAlgorithm", "Save features filtered by expression") QCoreApplication.translate("ScriptAlgorithm", "Extract raster values to CSV") QCoreApplication.translate("ScriptAlgorithm", "Save selected features") QCoreApplication.translate("ScriptAlgorithm", "Create vector layer from postgis table") QCoreApplication.translate("ScriptAlgorithm", "Keep n biggest parts") QCoreApplication.translate("ScriptAlgorithm", "Frequency analysis") QCoreApplication.translate("ScriptAlgorithm", "Cut by field") QCoreApplication.translate("ScriptAlgorithm", "Define 1 vector layer properties") QCoreApplication.translate("ScriptAlgorithm", "classification by decision tree") QCoreApplication.translate("ScriptAlgorithm", "Split vector layer by attribute") QCoreApplication.translate("ScriptAlgorithm", "CSV RGB or HEX to categorized style") QCoreApplication.translate("ScriptAlgorithm", "Contour") QCoreApplication.translate("ScriptAlgorithm", "Chainage") QCoreApplication.translate("ScriptAlgorithm", "Remove parts") QCoreApplication.translate("ScriptAlgorithm", "Summarize") QCoreApplication.translate("ScriptAlgorithm", "CSV R-G-B to categorized style") QCoreApplication.translate("ScriptAlgorithm", "pygraticule") QCoreApplication.translate("ScriptAlgorithm", "Define 1 raster layer properties") QCoreApplication.translate("ScriptAlgorithm", "Create tiling from vector layer") QCoreApplication.translate("ScriptAlgorithm", "Define multiple vector layers properties") QCoreApplication.translate("ScriptAlgorithm", "Set multiple raster layers properties") QCoreApplication.translate("ScriptAlgorithm", "Fill holes") QCoreApplication.translate("ScriptAlgorithm", "Unique values count") """TauDEMAlgorithmProvider""" """ModelerAlgorithmProvider""" """Groups and subgroups""" QCoreApplication.translate("AlgorithmClassification", "User scripts") QCoreApplication.translate("AlgorithmClassification", "Statistics") QCoreApplication.translate("AlgorithmClassification", "Vector overlay tools") QCoreApplication.translate("AlgorithmClassification", "Overlay") QCoreApplication.translate("AlgorithmClassification", "[GDAL] Miscellaneous") QCoreApplication.translate("AlgorithmClassification", "Feature Extraction") QCoreApplication.translate("AlgorithmClassification", "Raster -> Vector") QCoreApplication.translate("AlgorithmClassification", "Miscellaneous") QCoreApplication.translate("AlgorithmClassification", "Vector geometry tools") QCoreApplication.translate("AlgorithmClassification", "Vector selection tools") QCoreApplication.translate("AlgorithmClassification", "Vector Data Manipulation") QCoreApplication.translate("AlgorithmClassification", "[OGR] Conversion") QCoreApplication.translate("AlgorithmClassification", "Imagery (i.*)") QCoreApplication.translate("AlgorithmClassification", "Point pattern analysis") QCoreApplication.translate("AlgorithmClassification", "My scripts") QCoreApplication.translate("AlgorithmClassification", "Modeler-only tools") QCoreApplication.translate("AlgorithmClassification", "Home Range Analysis") QCoreApplication.translate("AlgorithmClassification", "Vector (v.*)") QCoreApplication.translate("AlgorithmClassification", "Miscellaneous (m.*)") QCoreApplication.translate("AlgorithmClassification", "Visualization(NVIZ)") QCoreApplication.translate("AlgorithmClassification", "Segmentation") QCoreApplication.translate("AlgorithmClassification", "Vector table tools") QCoreApplication.translate("AlgorithmClassification", "Vector creation tools") QCoreApplication.translate("AlgorithmClassification", "Raster - vector") QCoreApplication.translate("AlgorithmClassification", "Viewsheds\Lighting") QCoreApplication.translate("AlgorithmClassification", "Calibration") QCoreApplication.translate("AlgorithmClassification", "Classification by decision tree") QCoreApplication.translate("AlgorithmClassification", "Vector -> Raster") QCoreApplication.translate("AlgorithmClassification", "Raster general tools") QCoreApplication.translate("AlgorithmClassification", "[OGR] Miscellaneous") QCoreApplication.translate("AlgorithmClassification", "[OGR] Geoprocessing") QCoreApplication.translate("AlgorithmClassification", "Image Manipulation") QCoreApplication.translate("AlgorithmClassification", "Raster tools") QCoreApplication.translate("AlgorithmClassification", "Graphics") QCoreApplication.translate("AlgorithmClassification", "Plots") QCoreApplication.translate("AlgorithmClassification", "Image Filtering") QCoreApplication.translate("AlgorithmClassification", "[GDAL] Analysis") QCoreApplication.translate("AlgorithmClassification", "Raster") QCoreApplication.translate("AlgorithmClassification", "Stereo") QCoreApplication.translate("AlgorithmClassification", "Vector processing") QCoreApplication.translate("AlgorithmClassification", "Vector analysis tools") QCoreApplication.translate("AlgorithmClassification", "[GDAL] Projections") QCoreApplication.translate("AlgorithmClassification", "Database") QCoreApplication.translate("AlgorithmClassification", "Geometry") QCoreApplication.translate("AlgorithmClassification", "Terrain analysis and geomorphometry") QCoreApplication.translate("AlgorithmClassification", "Style") QCoreApplication.translate("AlgorithmClassification", "Domain specific") QCoreApplication.translate("AlgorithmClassification", "Lines") QCoreApplication.translate("AlgorithmClassification", "Analysis") QCoreApplication.translate("AlgorithmClassification", "[GDAL] Extraction") QCoreApplication.translate("AlgorithmClassification", "Vector") QCoreApplication.translate("AlgorithmClassification", "Table") QCoreApplication.translate("AlgorithmClassification", "Basic statistics") QCoreApplication.translate("AlgorithmClassification", "Vector general tools") QCoreApplication.translate("AlgorithmClassification", "Modeler") QCoreApplication.translate("AlgorithmClassification", "Vector_layer") QCoreApplication.translate("AlgorithmClassification", "Selection") QCoreApplication.translate("AlgorithmClassification", "Raster (r.*)") QCoreApplication.translate("AlgorithmClassification", "[GDAL] Conversion") QCoreApplication.translate("AlgorithmClassification", "Modeler tools") QCoreApplication.translate("AlgorithmClassification", "Creation") QCoreApplication.translate("AlgorithmClassification", "Table tools") QCoreApplication.translate("AlgorithmClassification", "Edition") QCoreApplication.translate("AlgorithmClassification", "Points") QCoreApplication.translate("AlgorithmClassification", "Utils") QCoreApplication.translate("AlgorithmClassification", "Raster processing") QCoreApplication.translate("AlgorithmClassification", "Learning") QCoreApplication.translate("AlgorithmClassification", "Images") QCoreApplication.translate("AlgorithmClassification", "Geometry operations") QCoreApplication.translate("AlgorithmClassification", "General tools") QCoreApplication.translate("AlgorithmClassification", "Polygons") ```
[ { "content": "Here is a code file:\n```python\n# -*- coding: utf8 -*-\r\n\r\nimport sys, os\r\nsys.path.append(os.path.abspath('.'))\r\n\r\nimport re\r\n\r\nfrom operator import attrgetter\r\nimport difflib\r\n\r\n# Pylons model init sequence\r\nimport pylons.test\r\nimport logging\r\n\r\nfrom quanthistling.con...
[ { "content": "Here is a code file:\n<|memory_start|>```python\n# -*- coding: utf8 -*-\r\n\r\nimport sys, os\r\nsys.path.append(os.path.abspath('.'))\r\n\r\nimport re\r\n\r\nfrom operator import attrgetter\r\nimport difflib\r\n\r\n# Pylons model init sequence\r\nimport pylons.test\r\nimport logging\r\n\r\nfrom q...
```python # -*- coding: utf8 -*- import sys, os sys.path.append(os.path.abspath('.')) import re from operator import attrgetter import difflib # Pylons model init sequence import pylons.test import logging from quanthistling.config.environment import load_environment from quanthistling.model.meta import Session, metadata from quanthistling import model import quanthistling.dictdata.books from paste.deploy import appconfig import functions def annotate_crossrefs(entry): # delete head annotations crossreference_annotations = [ a for a in entry.annotations if a.value=='crossreference'] for a in crossreference_annotations: Session.delete(a) crossreference_match = re.search(u"Vea ([^.]*)(?:\.|$)", entry.fullentry) if crossreference_match: entry.append_annotation(crossreference_match.start(1), crossreference_match.end(1), u'crossreference', u'dictinterpretation') def annotate_head(entry): # delete head annotations head_annotations = [ a for a in entry.annotations if a.value=='head' or a.value=="iso-639-3" or a.value=="doculect"] for a in head_annotations: Session.delete(a) heads = [] head_end_pos = functions.get_last_bold_pos_at_start(entry) head_start_pos = 0 substr = entry.fullentry[head_start_pos:head_end_pos] start = head_start_pos for match in re.finditer(r'(?:, ?|$)', substr): end = match.start(0) inserted_head = functions.insert_head(entry, start, end) heads.append(inserted_head) start = match.end(0) return heads def annotate_pos(entry): # delete pos annotations pos_annotations = [ a for a in entry.annotations if a.value=='pos'] for a in pos_annotations: Session.delete(a) head_end_pos = functions.get_last_bold_pos_at_start(entry) re_bracket = re.compile(u"\(.*?\)") match_bracket = re_bracket.search(entry.fullentry, head_end_pos) if match_bracket and match_bracket.start(0) < (head_end_pos + 2): entry.append_annotation(match_bracket.start(0)+1, match_bracket.end(0)-1, u'pos', u'dictinterpretation') def annotate_translations(entry): # delete pos annotations trans_annotations = [ a for a in entry.annotations if a.value=='translation'] for a in trans_annotations: Session.delete(a) translations_start = functions.get_pos_or_head_end(entry) + 1 translations_end = len(entry.fullentry) if re.match(u"\.? ?Vea ", entry.fullentry[translations_start:]): return first_bold_after_pos = functions.get_first_bold_start_in_range(entry, translations_start, translations_end) if first_bold_after_pos != -1: translations_end = first_bold_after_pos start = translations_start for match in re.finditer(u"(?:[,;] ?|$)", entry.fullentry[translations_start:translations_end]): mybreak = False # are we in a bracket? for m in re.finditer(r'\(.*?\)', entry.fullentry[translations_start:translations_end]): if match.start(0) >= m.start(0) and match.end(0) <= m.end(0): mybreak = True if not mybreak: end = match.start(0) + translations_start subsubstr = entry.fullentry[start:end] if not(re.match(r"\s*$", subsubstr)): functions.insert_translation(entry, start, end) start = match.end(0) + translations_start def annotate_examples(entry): # delete example annotations ex_annotations = [ a for a in entry.annotations if a.value=='example-src' or a.value=='example-tgt'] for a in ex_annotations: Session.delete(a) after_head_or_pos = functions.get_pos_or_head_end(entry) + 1 if re.match(u"\.? ?Vea ", entry.fullentry[after_head_or_pos:]): return first_bold_after_pos = functions.get_first_bold_start_in_range(entry, after_head_or_pos, len(entry.fullentry)) if first_bold_after_pos == -1: return sorted_annotations = [ a for a in entry.annotations if a.value=='bold' and a.start > after_head_or_pos ] sorted_annotations = sorted(sorted_annotations, key=attrgetter('start')) i = 0 start_annotation = i end_annotation = i while i < len(sorted_annotations): # concat successive annotations next = False if ( i < (len(sorted_annotations))-1 ): if ((sorted_annotations[i].end == sorted_annotations[i+1].start) or (sorted_annotations[i].end == (sorted_annotations[i+1].start-1))): end_annotation = i + 1 next = True if not next: # is there another bold annotation after this one? if end_annotation < (len(sorted_annotations)-1): entry.append_annotation(sorted_annotations[start_annotation].start, sorted_annotations[end_annotation].end, u'example-src', u'dictinterpretation') entry.append_annotation(sorted_annotations[end_annotation].end, sorted_annotations[end_annotation+1].start, u'example-tgt', u'dictinterpretation') else: entry.append_annotation(sorted_annotations[start_annotation].start, sorted_annotations[end_annotation].end, u'example-src', u'dictinterpretation') entry.append_annotation(sorted_annotations[end_annotation].end, len(entry.fullentry), u'example-tgt', u'dictinterpretation') start_annotation = i + 1 end_annotation = i + 1 i = i + 1 def main(argv): bibtex_key = u"shell1987" if len(argv) < 2: print "call: annotations_for%s.py ini_file" % bibtex_key exit(1) ini_file = argv[1] conf = appconfig('config:' + ini_file, relative_to='.') if not pylons.test.pylonsapp: load_environment(conf.global_conf, conf.local_conf) # Create the tables if they don't already exist metadata.create_all(bind=Session.bind) dictdatas = Session.query(model.Dictdata).join( (model.Book, model.Dictdata.book_id==model.Book.id) ).filter(model.Book.bibtex_key==bibtex_key).all() for dictdata in dictdatas: entries = Session.query(model.Entry).filter_by(dictdata_id=dictdata.id).all() #entries = Session.query(model.Entry).filter_by(dictdata_id=dictdata.id,startpage=40,pos_on_page=9).all() #entries = [] startletters = set() for e in entries: heads = annotate_head(e) if not e.is_subentry: for h in heads: if len(h) > 0: startletters.add(h[0].lower()) annotate_pos(e) annotate_translations(e) annotate_examples(e) annotate_crossrefs(e) dictdata.startletters = unicode(repr(sorted(list(startletters)))) Session.commit() if __name__ == "__main__": main(sys.argv) ```
[ { "content": "Repeat the full code snippet:\n```python\nimport os, serial, time, numpy\n\nwatchTime = 3600\nmeasureInterval = 5\ncalcTime = 1.5\n\ndef receiving(ser):\n global last_received\n buffer_string = ''\n while True:\n buffer_string = buffer_string + ser.read(ser.inWaiting())\n if...
[ { "content": "Repeat the full code snippet:\n<|memory_start|>```python\nimport os, serial, time, numpy\n\nwatchTime = 3600\nmeasureInterval = 5\ncalcTime = 1.5\n\ndef receiving(ser):\n global last_received\n buffer_string = ''\n while True:\n buffer_string = buffer_string + ser.read(ser.inWaitin...
```python import os, serial, time, numpy watchTime = 3600 measureInterval = 5 calcTime = 1.5 def receiving(ser): global last_received buffer_string = '' while True: buffer_string = buffer_string + ser.read(ser.inWaiting()) if '\n' in buffer_string: lines = buffer_string.split('\n') # Guaranteed to have at least 2 entries last_received = lines[-2] buffer_string = lines[-1] return last_received def dateTime(): return time.strftime("%Y/%m/%d %H:%M:%S") ser = serial.Serial('/dev/ttyACM0', 9600) # Don't write immediately, the Arduino is restarting time.sleep(3) timePassed = 0 values = () startDate = "none" while 1: if timePassed >= watchTime: f = open(os.path.expanduser('~') + "/waterlog.txt", 'a') f.write(startDate + " - " + dateTime() + " " + str(numpy.mean(values)) + "\n") f.close() print dateTime() + " Wrote to file successfully" timePassed = 0 values = () startDate = "none" if "none" in startDate: startDate = dateTime() ser.write('4') message = receiving(ser) value = int(message[-4:]) values += (value,) timePassed += measureInterval time.sleep(measureInterval - calcTime) ```
[ { "content": "Repeat the following code:\n```python\n# Author: Trevor Perrin\n# See the LICENSE file for legal information regarding use of this file.\n\n\"\"\"TLS Lite + poplib.\"\"\"\n\nimport socket\nfrom poplib import POP3, POP3_SSL_PORT\nfrom tlslite.tlsconnection import TLSConnection\nfrom tlslite.integra...
[ { "content": "Repeat the following code:\n<|memory_start|>```python\n# Author: Trevor Perrin\n# See the LICENSE file for legal information regarding use of this file.\n\n\"\"\"TLS Lite + poplib.\"\"\"\n\nimport socket\nfrom poplib import POP3, POP3_SSL_PORT\nfrom tlslite.tlsconnection import TLSConnection\nfrom...
```python # Author: Trevor Perrin # See the LICENSE file for legal information regarding use of this file. """TLS Lite + poplib.""" import socket from poplib import POP3, POP3_SSL_PORT from tlslite.tlsconnection import TLSConnection from tlslite.integration.clienthelper import ClientHelper class POP3_TLS(POP3, ClientHelper): """This class extends L{poplib.POP3} with TLS support.""" def __init__(self, host, port = POP3_SSL_PORT, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, username=None, password=None, certChain=None, privateKey=None, x509Fingerprint=None, tackID=None, hardTack=None, settings=None): """Create a new POP3_TLS. For client authentication, use one of these argument combinations: - username, password (SRP) - certChain, privateKey (certificate) For server authentication, you can either rely on the implicit mutual authentication performed by SRP or you can do certificate-based server authentication with one of these argument combinations: - x509Fingerprint Certificate-based server authentication is compatible with SRP or certificate-based client authentication. The caller should be prepared to handle TLS-specific exceptions. See the client handshake functions in L{tlslite.TLSConnection.TLSConnection} for details on which exceptions might be raised. @type host: str @param host: Server to connect to. @type port: int @param port: Port to connect to. @type username: str @param username: SRP username. @type password: str @param password: SRP password for mutual authentication. Requires the 'username' argument. @type certChain: L{tlslite.x509certchain.X509CertChain} @param certChain: Certificate chain for client authentication. Requires the 'privateKey' argument. Excludes the SRP argument. @type privateKey: L{tlslite.utils.rsakey.RSAKey} @param privateKey: Private key for client authentication. Requires the 'certChain' argument. Excludes the SRP argument. @type x509Fingerprint: str @param x509Fingerprint: Hex-encoded X.509 fingerprint for server authentication. @type tackID: str @param tackID: TACK ID for server authentication. @type hardTack: bool @param hardTack: Whether to raise TackBreakSigError on TACK Break. @type settings: L{tlslite.handshakesettings.HandshakeSettings} @param settings: Various settings which can be used to control the ciphersuites, certificate types, and SSL/TLS versions offered by the client. """ self.host = host self.port = port sock = socket.create_connection((host, port), timeout) ClientHelper.__init__(self, username, password, certChain, privateKey, x509Fingerprint, tackID, hardTack, settings) connection = TLSConnection(sock) ClientHelper._handshake(self, connection) self.sock = connection self.file = self.sock.makefile('rb') self._debugging = 0 self.welcome = self._getresp() ```
[ { "content": "Write the code verbatim:\n```python\n#Copyright 2015 B. Johan G. Svensson\r\n#Licensed under the terms of the MIT license (see LICENSE).\r\n\r\nfrom __future__ import division\r\nimport string, struct, collections\r\nimport fsslib, floppytrack\r\n\r\n\r\n\r\n#import floppyinforead\r\n#dsk = floppy...
[ { "content": "Write the code verbatim:\n<|memory_start|>```python\n#Copyright 2015 B. Johan G. Svensson\r\n#Licensed under the terms of the MIT license (see LICENSE).\r\n\r\nfrom __future__ import division\r\nimport string, struct, collections\r\nimport fsslib, floppytrack\r\n\r\n\r\n\r\n#import floppyinforead\...
```python #Copyright 2015 B. Johan G. Svensson #Licensed under the terms of the MIT license (see LICENSE). from __future__ import division import string, struct, collections import fsslib, floppytrack #import floppyinforead #dsk = floppyinforead.analyzedisc('j:\\Transfer\\CloudKingdom.fss') ##-Take a coffee break... #print(floppyinforead.printtable(dsk, 5)) #revolution #image = floppyinforead.extractimage(dsk,80,2,10) #tracks, sides, sectors, (size=512) #fid = open('CloudKingdom.data','wb') #fid.write(image) #fid.close() #Create a string type that doesn't fill the screen with data: class Silentstring(str): __repr__ = object.__repr__ class Silentlist(list): __repr__ = object.__repr__ Track = collections.namedtuple('Track', ['trtimedata', 'trsynced', 'trmfmdec', 'sectors']) Sector = collections.namedtuple('Sector', ['metadata', 'data']) def sorthelper(a): try: return a[0] except TypeError: return a def analyzedisc(fname): fid = open(fname, 'rb') floppy = fsslib.Reader(fid) fid.close() retlst = Silentlist() for t in xrange(164): side = t%2 track = t//2 print "------Track:", track, "Side:", side retlst.append(analysetrack((side,track), *floppy.gettrack(side, track))) return retlst def analysetrack(sidetrck,timedata, indpuls): print indpuls bitsout, indexes = floppytrack.syncronize(timedata, indpuls) dataout, markers, indxp = floppytrack.mfmdecode(bitsout, indexes) markerpairs, unknownmarkers = floppytrack.lookupaddressmarkers(dataout, markers) mrkrspairs = floppytrack.revolutionseparate(markerpairs, indxp) unknownmrkrs = floppytrack.revolutionseparate(unknownmarkers, indxp) trtimedata = {'timedata' : timedata, 'indpuls' : indpuls} trsynced = {'rawbits' : bitsout, 'indpuls' : indexes} trmfmdec = {'data' : dataout, 'markers' : markers, 'indpuls' : indxp} sectors = Silentlist() for rev in zip(mrkrspairs, unknownmrkrs): #print "---INDEX---" sectors.append(analyserevol(sidetrck, dataout, *rev)) track = Track(trtimedata=trtimedata, trsynced=trsynced, trmfmdec=trmfmdec, sectors=sectors) return track def analyserevol(sidetrck, data, mrkrspairs, unknownmrkrs): markers = mrkrspairs + unknownmrkrs sortmark = sorted(markers,key=sorthelper) retlst = Silentlist() for mark in sortmark: try: if len(mark) == 2: retlst.append(analysesector(sidetrck, mark, data)) else: print "Unkown type of marker-tuple received!", mark , sidetrck except TypeError: retlst.append(analysedudmark(mark, data)) return retlst def analysesector(sidetrck, mark, data): #If it's a pair the id-field should always exits - but may be faulty sectid = floppytrack.addressdecode(mark[0],data) data, datainfo = floppytrack.dataextract(mark[1],sectid['size'],data) sector = Sector(metadata=dict(sectid.items() + datainfo.items()), data=Silentstring(data)) return sector def analysedudmark(mark, data): print mark print "dudmark", hex(ord(data[mark])) themark = data[mark] if struct.unpack('B',themark)[0] == 0xFE: # If id-field is truncated: sectid = dict() sectid = floppytrack.addressdecode(mark,data) else: sectid = dict() sector = Sector(metadata=sectid, data=themark) return sector def getinfo(sector,tracknum,sidenum): probstr = "" try: if sector.metadata['side'] != sidenum or sector.metadata['track'] != tracknum: probstr = "*" elif len(sector.data) == 1: #This is id-field with no data (data contains only 0xFE) probstr = "?" elif len(sector.data) != sector.metadata['size']: probstr = "T" elif not sector.metadata['datacrcok']: probstr = "!" # CRC error elif sector.metadata['sizecode'] != 2: probstr = "#" # Unusual sector size except KeyError: #Dudmark if len(sector.data) != 1: raise ValueError('dudmarks should always have data of length 1') #Note: This is not a sector number as above - this is hexdata. return "D" + "{0:02x}".format(ord(sector.data)).upper() return probstr + str(sector.metadata['sectnum']) def printtable(disc,rev): layoutstr='Detection:' + ' '*16 for num in range(1,33): layoutstr+=string.rjust(str(num),4,' ') for track in xrange(len(disc)): tracknum = track//2 sidenum = track%2 layoutstr+='\n' layoutstr+='Layout Track: ' layoutstr+=string.rjust(str(tracknum),2,'0') layoutstr+=' Side: ' + str(sidenum) + ': ' if rev < len(disc[track].sectors): for sectnum in xrange(len(disc[track].sectors[rev])): layoutstr+=string.rjust(getinfo(disc[track].sectors[rev][sectnum],tracknum,sidenum),4,' ') layoutstr+='\n' layoutstr+='\n\n' layoutstr+='#=Non 512 bytes sectors, !=CRC-Error, ?=Unknown/faulty mark' layoutstr+='\n' layoutstr+='*=Track/side numbering is wrong, TI=Truncated ID, TD=Truncated Data' layoutstr+='\n' layoutstr+='Only one is shown - increasing priority # -> ! -> * -> ?/TD/TI.' layoutstr+='\n' return layoutstr def extractsector(sector, ntrack, nsides, sectorsize): data = sector.data tracknum = ntrack//nsides sidenum = ntrack%nsides bad = False try: if sector.metadata['track'] != tracknum: bad = True if sector.metadata['side'] != sidenum: bad = True if not sector.metadata['idcrcok']: bad = True if not sector.metadata['datacrcok']: bad = True if sector.metadata['size'] != sectorsize: bad = True if sector.metadata['size'] > sectorsiz: data = data[:sectorsiz] else: data += '\0'*(sectorsiz - len(data)) except KeyError: return 0, True, '' return sector.metadata['sectnum'], bad, data def extractimage(discstruct,ntracks,nsides,nsectors,sectorsize=512): disc = discstruct.sectors ntracksides = ntracks * nsides validsectors = set(range(1,nsectors + 1)) imagelst = [] badlst = [] missinglst = [] for ntrack in xrange(ntracksides): imagelst.append([]) badlst.append([]) missinglst.append([]) for nsector in xrange(nsectors): imagelst[ntrack].append(['\0']*sectorsize) badlst[ntrack].append(True) missinglst[ntrack].append(True) for ntrack, track in enumerate(disc): for nrev, rev in enumerate(track.sectors): for nsector, sector in enumerate(rev): sectnum, bad, data = extractsector(sector, ntrack, nsides, sectorsize) if not sectnum in validsectors: continue if bad: print 'Bad sector in in-image:', print 'Track:', ntrack//nsides, print ' Side:', ntrack%nsides, print ' Num:', sectnum, print ' Rev:', nrev if badlst[ntrack][sectnum - 1]: imagelst[ntrack][sectnum - 1] = disc[ntrack].sectors[nrev][nsector].data if type(imagelst[ntrack][sectnum - 1]).__name__ != 'Silentstring': print "ntrack, sectnum", ntrack, sectnum print type(imagelst[ntrack][sectnum - 1]).__name__ return imagelst[ntrack][sectnum - 1] badlst[ntrack][sectnum - 1] = bad missinglst[ntrack][sectnum - 1] = False #imagelst[ntrack*nsides + nside][ntrack] image = Silentstring() badsectors = [] missingsectors = [] for ntrack in xrange(ntracksides): for nsector in xrange(nsectors): try: image += imagelst[ntrack][nsector] except: print "ntrack, nsector", ntrack, nsector return imagelst[ntrack][nsector] if missinglst[ntrack][sectnum]: print 'Missing sector in out-image:', print 'Track:', ntrack//nsides, print ' Side:', ntrack%nsides, print ' Num:', nsector+1 elif badlst[ntrack][nsector]: print 'Bad sector in out-image:', print 'Track:', ntrack//nsides, print ' Side:', ntrack%nsides, print ' Num:', nsector+1 return image def savetascsv(disc,track, fname): floppytrack.savetascsv(disc[track].trtimedata['timedata'], fname) ```
[ { "content": "Provide a verbatim copy of the code:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nimport numpy\nfrom util import sigmoid\n\nfrom abc import ABCMeta, abstractmethod\nfrom graph import Bipartite\nfrom collections import deque\n\nclass MCMethod(...
[ { "content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nimport numpy\nfrom util import sigmoid\n\nfrom abc import ABCMeta, abstractmethod\nfrom graph import Bipartite\nfrom collections import deque\n\...
```python #!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import numpy from util import sigmoid from abc import ABCMeta, abstractmethod from graph import Bipartite from collections import deque class MCMethod(object): """Interface for Monte Carlo-like methods.""" __metaclass__ = ABCMeta _network = None _evnMask = None _oddMask = None #_bipartite = None def __init__(self, network): self._network = network bipartite = Bipartite(network.weights) if not bipartite.isBipartite: raise NotImplementedError("Network must be bipartite") self._evnMask = bipartite.mask(True) self._oddMask = bipartite.mask(False) def update(self): self._updateOnMask(self._evnMask) self._updateOnMask(self._oddMask) @abstractmethod def _updateOnMask(self, mask): raise NotImplementedError("Please Implement this method") class BinaryThreshold(MCMethod): """Finds the local minima.""" def _updateOnMask(self, mask): self._network.units[mask] = self._network.gaps[mask] < 0 class SigmoidUpdate(MCMethod): """Used in RBMs.""" def _updateOnMask(self, mask): self._network.units[mask] = sigmoid(self._network.gaps[mask]) > numpy.random.random(len(mask)) class MetropolisAlgorithm(MCMethod): """Metropolis-Hastings algorithm.""" def _updateOnMask(self, mask): # Energy change due to flipping selected units. dE = self._network.gaps[mask] * ( 1. - 2. * self._network.units[mask] ) # Update rule for Metrolopis-Hastings algorithm select = numpy.minimum(1, numpy.exp(-dE)) > numpy.random.random(len(dE)) # XOR will flip the units where select == True self._network.units[mask] = numpy.logical_xor(select, self._network.units[mask]) class BinaryGibbsStep(MCMethod): def _updateOnMask(self, mask): sig = 1./(1 + numpy.exp(-self._network.gaps[mask])) select = sig > 0.5 self._network.units[mask] = numpy.logical_xor(select, self._network.units[mask]) class WolffClusterAlgorithm(MCMethod): def __init__(self, network): super(WolffClusterAlgorithm, self).__init__(network) #raise NotImplementedError() # BFS is not easily parallel # Use union-find algorithms somehow? # Maybe this: # 1) union EVERY neighbor spin together with probability p in parallel # 2) select random site, create mask using find algorithm in parallel # 3) flip sites in parallel # Worst case when clusters are small (high-T) def update(self): # This is a terrible way to find J J = abs(self._network.weights.matrix.min()) p = 1 - numpy.exp(2 * J) boundary = deque() marked = self._network.size * [False] site = numpy.random.randint(0, self._network.size - 1) boundary.append(site) while len(boundary) > 0: site = boundary.popleft() marked[site] = True for neighbor in self._network.weights.adj(site): if self._network.units[neighbor] == self._network.units[site] and \ not marked[neighbor] and numpy.random.random() < p: boundary.append(neighbor) mask = numpy.where(marked) self._network.units[mask] = numpy.logical_not(self._network.units[mask]) def _updateOnMask(self, mask): raise NotImplementedError() ```
[ { "content": "```python\n# -*- coding: utf-8 -*-\n\n# Scrapy settings for curation_spider project\n#\n# For simplicity, this file contains only settings considered important or\n# commonly used. You can find more settings consulting the documentation:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.ht...
[ { "content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n# Scrapy settings for curation_spider project\n#\n# For simplicity, this file contains only settings considered important or\n# commonly used. You can find more settings consulting the documentation:\n#\n# http://doc.scrapy.org/en/latest/to...
```python # -*- coding: utf-8 -*- # Scrapy settings for curation_spider project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # http://doc.scrapy.org/en/latest/topics/settings.html # http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html # http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html BOT_NAME = 'curation_spider' SPIDER_MODULES = ['curation_spider.spiders'] NEWSPIDER_MODULE = 'curation_spider.spiders' DUPEFILTER_DEBUG = True # Crawl responsibly by identifying yourself (and your website) on the user-agent #USER_AGENT = 'curation_spider (+http://www.yourdomain.com)' # Configure maximum concurrent requests performed by Scrapy (default: 16) #CONCURRENT_REQUESTS=32 # Configure a delay for requests for the same website (default: 0) # See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs DOWNLOAD_DELAY=1 # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN=16 #CONCURRENT_REQUESTS_PER_IP=16 # Disable cookies (enabled by default) #COOKIES_ENABLED=False # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED=False # Override the default request headers: #DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', #} # Enable or disable spider middlewares # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html #SPIDER_MIDDLEWARES = { # 'curation_spider.middlewares.MyCustomSpiderMiddleware': 543, #} # Enable or disable downloader middlewares # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html #DOWNLOADER_MIDDLEWARES = { # 'curation_spider.middlewares.MyCustomDownloaderMiddleware': 543, #} # Enable or disable extensions # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html #EXTENSIONS = { # 'scrapy.telnet.TelnetConsole': None, #} # Configure item pipelines # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html #ITEM_PIPELINES = { # 'curation_spider.pipelines.SomePipeline': 300, #} # Enable and configure the AutoThrottle extension (disabled by default) # See http://doc.scrapy.org/en/latest/topics/autothrottle.html # NOTE: AutoThrottle will honour the standard settings for concurrency and delay #AUTOTHROTTLE_ENABLED=True # The initial download delay #AUTOTHROTTLE_START_DELAY=5 # The maximum download delay to be set in case of high latencies #AUTOTHROTTLE_MAX_DELAY=60 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG=False # Enable and configure HTTP caching (disabled by default) # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED=True #HTTPCACHE_EXPIRATION_SECS=0 #HTTPCACHE_DIR='httpcache' #HTTPCACHE_IGNORE_HTTP_CODES=[] #HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage' ```
[ { "content": "Here is a code snippet:\n```python\n__author__ = 'olav'\n\nimport sys, os\nimport theano.tensor as T\nimport theano\nimport numpy as np\n\nsys.path.append(os.path.abspath(\"./\"))\nfrom wrapper import create_output_func\nfrom model import ConvModel\n\n\ndef create_threshold_image(image, threshold)...
[ { "content": "Here is a code snippet:\n<|memory_start|>```python\n__author__ = 'olav'\n\nimport sys, os\nimport theano.tensor as T\nimport theano\nimport numpy as np\n\nsys.path.append(os.path.abspath(\"./\"))\nfrom wrapper import create_output_func\nfrom model import ConvModel\n\n\ndef create_threshold_image(i...
```python __author__ = 'olav' import sys, os import theano.tensor as T import theano import numpy as np sys.path.append(os.path.abspath("./")) from wrapper import create_output_func from model import ConvModel def create_threshold_image(image, threshold): binary_arr = np.ones(image.shape) low_values_indices = image <= threshold # Where values are low binary_arr[low_values_indices] = 0 # All low values set to 0 return binary_arr def resize(image, size): return image.resize( [int(size * s) for s in image.size] ) def create_predictor(dataset, model_config, model_params, batch_size): x = T.matrix('x') y = T.imatrix('y') drop = T.iscalar('drop') index = T.lscalar() model = ConvModel(model_config, verbose=True) model.build(x, drop, batch_size, init_params=model_params) return create_output_func(dataset, x, y, drop, [index], model.get_output_layer(), batch_size) def create_simple_predictor(model_config, model_params): #TODO: Does this single predictor even work? data = T.matrix('data') x = T.matrix('x') drop = T.iscalar('drop') batch_size = 1 model = ConvModel(model_config, verbose=True) model.build(x, drop, batch_size, init_params=model_params) return model.create_predict_function(x, drop, data) def batch_predict(predictor, dataset, dim, batch_size): examples = dataset[0].eval().shape[0] nr_of_batches = int(examples/ batch_size) result_output = np.empty((examples, dim*dim), dtype=theano.config.floatX) result_label = np.empty((examples, dim*dim), dtype=theano.config.floatX) for i in range(nr_of_batches): output, label = predictor(i) result_output[i*batch_size: (i+1)*batch_size] = output result_label[i*batch_size: (i+1)*batch_size] = label return result_output, result_label ```
[ { "content": "Here is the source code:\n```python\n#!/usr/bin/python3\n\nimport sys\nimport random\nimport math\nimport os\nimport getopt\nimport pygame\nimport shelve\nimport time\nfrom pygame.locals import *\n\nif not pygame.font:\n print('Warning, fonts disabled')\nif not pygame.mixer:\n print('Warning...
[ { "content": "Here is the source code:\n<|memory_start|>```python\n#!/usr/bin/python3\n\nimport sys\nimport random\nimport math\nimport os\nimport getopt\nimport pygame\nimport shelve\nimport time\nfrom pygame.locals import *\n\nif not pygame.font:\n print('Warning, fonts disabled')\nif not pygame.mixer:\n ...
```python #!/usr/bin/python3 import sys import random import math import os import getopt import pygame import shelve import time from pygame.locals import * if not pygame.font: print('Warning, fonts disabled') if not pygame.mixer: print('Warning, sound disabled') # setting up constants WINDOW_WIDTH = 640 WINDOW_HEIGHT = 480 BLACK = (0, 0, 0) WHITE = (255, 255, 255) RED = (255, 0, 0) GREEN = (0, 255, 0) BLUE = (0, 0, 255) DARK_GREEN = (0, 65, 0) GREY = (100, 100, 100) BACKGROUND_COLOR = DARK_GREEN COLLISION_RECT_COLOR = [n * 0.8 for n in BACKGROUND_COLOR] MAX_FPS = 60 ENEMY_SPAWNDELAY = 500 # divided by current level windowcolor = BLACK PLAYER_SPEED = .025 FRICTION = 0.00667 ENEMY_MIN_SPEED = 0.01 ENEMY_MAX_SPEED = 0.2 LEVEL_LENGTH = 6 * 1000 # in milliseconds # get fonts from /data/fonts* FONTFILES = [f for f in os.listdir(os.path.join("data", "fonts")) if f.endswith('.ttf')] FONTS = [] # None = default font for file in FONTFILES: FONTS.append(os.path.join("data", "fonts", file)) MENU_FONT = os.path.join("data", "fonts", "kenpixel.ttf") # used for main menu GAME_OVER_FONT = None # None = pygame default, used for game over screen # None = pygame default, used for fps/frametime/enemy number indicators in game GUI_FONT = None NUMBER_IMAGES = [] for i in range(10): image = pygame.image.load( os.path.join("data", "numbers", "%d.png" % i)) image.set_colorkey(BLACK) NUMBER_IMAGES.append(image) class Player(): """The player. Can move left/right and up/down.""" def __init__(self, controls='all'): self.image, self.rect = load_image('player.png') self.pos = WINDOW_WIDTH / 2, WINDOW_HEIGHT / 2 screen = pygame.display.get_surface() self.area = screen.get_rect() self.speed = PLAYER_SPEED self.state = "still" self.moveright = 0 self.moveleft = 0 self.moveup = 0 self.movedown = 0 self.controls = controls self.reinit() def reinit(self): self.state = "still" self.movepos = [0, 0] def update(self, time_passed): # friction for i in range(time_passed): self.movepos[0] = self.movepos[0] * (1.0 - FRICTION) self.movepos[1] = self.movepos[1] * (1.0 - FRICTION) if abs(self.movepos[0]) < 0.1: self.movepos[0] = 0 if abs(self.movepos[1]) < 0.1: self.movepos[1] = 0 # apply player movement to velocity self.movepos[ 0] += (self.moveright - self.moveleft) * self.speed * time_passed self.movepos[1] += (self.movedown - self.moveup) * \ self.speed * time_passed # update x and y seperately to allow smooth movement along screen # edge # first, move x newpos = self.pos[0] + self.movepos[0], self.pos[1] newrect = Rect(newpos[0], newpos[1], self.rect.w, self.rect.h) # if new position is in screen, move if self.area.contains(newrect): self.rect = newrect self.pos = newpos # then, move y newpos = self.pos[0], self.pos[1] + self.movepos[1] newrect = Rect(newpos[0], newpos[1], self.rect.w, self.rect.h) # if new position is in screen, move if self.area.contains(newrect): self.rect = newrect self.pos = newpos class Enemy(pygame.sprite.Sprite): """An enemy: comes from the right, heads to the left. erratic: if True, the enemy's up/down velocity randomly changes. aimed: if True, the enemy starts out with a velocity towards the player. rotated: if True, the image is rotated 90, 180, or 270 degrees. """ def __init__(self, x, y, speed, game, image, erratic=False, aimed=False, rotated=False): pygame.sprite.Sprite.__init__(self) self.image = image self.rect = self.image.get_rect() if rotated: # rotate the image of the enemy in a random increment of 90 self.image = pygame.transform.rotate( self.image, random.choice([90, 180, 270])) # and get a new rect for it, too self.rect = self.image.get_rect() self.pos = x, y screen = pygame.display.get_surface() self.area = screen.get_rect() self.speed = speed self.game = game self.erratic = erratic self.aimed = aimed self.reinit() def reinit(self): self.state = "still" if not self.aimed: # enemies are by default moving left self.movepos = [-self.speed, 0] else: # pick random player to move towards player = self.game.players[ random.randint(0, len(self.game.players) - 1)] # calculate vector to player self.movepos = [ player.pos[0] - self.pos[0], player.pos[1] - self.pos[1]] # calculate current mag mag = math.sqrt(self.movepos[0] ** 2 + self.movepos[1] ** 2) # divide x/y movement by mag, changing angled movement to 1 self.movepos[0], self.movepos[1] = self.movepos[ 0] / mag, self.movepos[1] / mag # multiiply it by self.speed self.movepos[0], self.movepos[1] = self.speed * \ self.movepos[0], self.speed * self.movepos[1] def update(self, time_passed): if self.erratic: # moves erratically up and down self.movepos[ 1] += random.uniform(-ENEMY_MIN_SPEED, ENEMY_MIN_SPEED) newpos = self.pos[0] + self.movepos[0] * \ time_passed, self.pos[1] + self.movepos[1] * time_passed if newpos[0] + self.rect.w > -5: self.pos = newpos self.rect.x, self.rect.y = newpos else: self.game.enemies.remove(self) class TextEnemy(Enemy): def __init__(self, x, y, speed, game, text, **kwargs): image = render_number(text) super(TextEnemy, self).__init__(x, y, speed, game, image, **kwargs) class Dimmer: """Class for dimming the screen.""" def __init__(self, keepalive=0): self.keepalive = keepalive if self.keepalive: self.buffer = pygame.Surface( pygame.display.get_surface().get_size()) else: self.buffer = None def dim(self, darken_factor=64, color_filter=(0, 0, 0)): if not self.keepalive: self.buffer = pygame.Surface( pygame.display.get_surface().get_size()) self.buffer.blit(pygame.display.get_surface(), (0, 0)) if darken_factor > 0: darken = pygame.Surface(pygame.display.get_surface().get_size()) darken.fill(color_filter) darken.set_alpha(darken_factor) # safe old clipping rectangle... old_clip = pygame.display.get_surface().get_clip() # ..blit over entire screen... pygame.display.get_surface().blit(darken, (0, 0)) # pygame.display.flip() # ... and restore clipping pygame.display.get_surface().set_clip(old_clip) def undim(self): if self.buffer: pygame.display.get_surface().blit(self.buffer, (0, 0)) if not self.keepalive: self.buffer = None def render_number(text_number): font_width = 5 font_height = 7 int_digits = [int(digit) for digit in text_number] image_width = sum(NUMBER_IMAGES[d].get_width() for d in int_digits) + \ 2 * (len(text_number) - 1) image = pygame.Surface((image_width, font_height)) x = 0 for digit in text_number: int_digit = int(digit) digit_image = NUMBER_IMAGES[int_digit] image.blit(digit_image, ((x, 0), digit_image.get_size())) x += NUMBER_IMAGES[int_digit].get_width() + 2 image.set_colorkey(BLACK) scale = random.randint(3, 4) return pygame.transform.scale( image, (image.get_width() * scale, image.get_height() * scale)) def load_image(name, colorkey=None): fullname = os.path.join('data', name) try: image = pygame.image.load(fullname) except pygame.error as message: print(('Cannot load image:', name)) raise SystemExit(message) image = image.convert_alpha() if colorkey is not None: if colorkey is -1: colorkey = image.get_at((0, 0)) image.set_colorkey(colorkey, RLEACCEL) return image, image.get_rect() def get_random_font(): # returns a random font from the list FONTS return FONTS[random.randint(0, len(FONTS) - 1)] def get_frames_from_image(base_image, framenumber, framesize): # gets frames of an animation from an image frames = [] offsets = [] for n in range(framenumber): offsets.append(framesize[0] * n) for i in range(framenumber): # for each frame, turn it into a seperate image image = pygame.Surface(framesize) # image.blit(base_image, (0,0))#, (offsets[i], framesize)) image = base_image.subsurface( offsets[i], 0, framesize[0], framesize[1]) frames.append(image) return frames def load_sound(name): class NoneSound: def play(self): pass if not pygame.mixer: return NoneSound() fullname = os.path.join('data', name) try: sound = pygame.mixer.Sound(fullname) except pygame.error as message: print(('Cannot load sound:', wav)) raise SystemExit(message) return sound def terminate(): print('goodbye') pygame.quit() sys.exit() def save_highscores(highscores): file = shelve.open('data/highscores', 'n') file['highscores'] = highscores file.close() def load_highscores(): file = shelve.open('data/highscores', 'r') highscores = file['highscores'] file.close() return highscores def playertouchingenemy(playerrect, enemies): for enemy in enemies: if playerrect.colliderect(enemy.rect): return True return False def draw_text(text, font, surface, x, y, color=WHITE, background=None, position="topleft"): # draws some text using font to the surface textobj = font.render(text, 1, color) textrect = textobj.get_rect() if position == 'center': textrect.center = (x, y) elif position == 'bottomright': textrect.bottomright = (x, y) elif position == 'topleft': textrect.topleft = (x, y) elif position == 'topright': textrect.topright = (x, y) if background: pygame.draw.rect(screen, background, textrect.inflate(2, 2)) surface.blit(textobj, textrect) return textrect.inflate(2, 2) # for knowing where to redraw the background class TextSprite(pygame.sprite.Sprite): """For use in menus.""" def __init__(self, text, font, x, y, color=WHITE): self.text = text self.font = font self.color = color self.image = font.render(text, 1, color) self.rect = self.image.get_rect() self.rect.center = (x, y) def draw(self, screen): return screen.blit(self.image, self.rect) def change_color(self, color): self.image = self.font.render(self.text, 1, color) self.color = color class Game(object): base_enemy_spawn_delay = 500 # divided by current level base_level_length = 6000 # in milliseconds enemy_min_speed = 0.01 enemy_max_speed = 0.2 background_color = DARK_GREEN show_hitboxes = False show_debug_info = False hotseat_multiplayer = False # if controls == '', player is not playing types_of_controls = ['wasd', 'arrows', 'tfgh', 'ijkl', 'numpad', ''] # default controls for each player players_controls = ['wasd', 'arrows', 'tfgh', 'ijkl'] def __init__(self, screen): self.screen = screen self.clock = pygame.time.Clock() self.background = pygame.Surface(screen.get_size()).convert() self.background.fill(BACKGROUND_COLOR) # load highscores from data/highscores try: self.highscores = load_highscores() except: # get new highscores if it cannot load highscores self.highscores = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] self.init_game() def init_game(self): self.players = [] self.enemies = [] self.level = 1 # old textrects: used for filling background color self.old_textrects = [] def menu(self, title, options, title_size=50, option_size=25, enemies_background=True, option_selected=0): """ A basic menu. Arrow keys or the mouse are used to navigate. """ x = WINDOW_WIDTH / 2 titlefont = pygame.font.Font(MENU_FONT, title_size) title_y = title_size / 2 + 30 title = TextSprite(title, titlefont, x, title_y, RED) optioncolor = WHITE selectedoptioncolor = RED optionfont = pygame.font.Font(MENU_FONT, option_size) space_below_title = title_size space_between_options = optionfont.get_height() option_sprites = [] for i in range(len(options)): y = space_below_title + title_y \ + (i + 1) * space_between_options if option_selected == i: color = selectedoptioncolor else: color = optioncolor option_sprites.append( TextSprite(options[i], optionfont, x, y, color)) spawntimer = pygame.time.Clock() spawntime = 0 screen_dimmer = Dimmer() def update_option_sprites(option_sprites, old_option, new_option): option_sprites[old_option].change_color(optioncolor) option_sprites[new_option].change_color(selectedoptioncolor) while 1: time_since_last_frame = self.clock.tick(MAX_FPS) # clear screen with backgroundcolor screen_dimmer.undim() self.screen.blit(self.background, (0, 0)) if enemies_background: # draw background fanciness # scrolling enemies spawntime += spawntimer.tick() if spawntime >= ENEMY_SPAWNDELAY: spawntime -= ENEMY_SPAWNDELAY x = WINDOW_WIDTH - 10 y = random.randint(0, WINDOW_HEIGHT) speed = random.uniform(ENEMY_MIN_SPEED, ENEMY_MAX_SPEED) text = random.choice([str(random.randint(1, 1024))]) self.enemies.append( TextEnemy(x, y, speed, self, text)) for object in self.enemies[:]: object.update(time_since_last_frame) self.screen.blit(object.image, object.rect) # then, darken the screen without the title/options screen_dimmer.dim(darken_factor=200) title.draw(self.screen) for option in option_sprites: option.draw(self.screen) # update display pygame.display.update() # handle keys for menu for event in pygame.event.get(): if event.type == QUIT: self.exit() if event.type == KEYDOWN: if event.key == K_UP or event.key == ord('w'): old_option = option_selected option_selected -= 1 if option_selected < 0: option_selected = len(options) - 1 update_option_sprites( option_sprites, old_option, option_selected) elif event.key == K_DOWN or event.key == ord('s'): old_option = option_selected option_selected += 1 if option_selected > len(options) - 1: option_selected = 0 update_option_sprites( option_sprites, old_option, option_selected) elif event.key == K_ESCAPE: # pressing escape quits return "exit" elif event.key == K_RETURN: return option_selected elif event.type == MOUSEMOTION: for option in option_sprites: if option.rect.collidepoint(event.pos): old_option = option_selected option_selected = option_sprites.index(option) update_option_sprites( option_sprites, old_option, option_selected) break elif event.type == MOUSEBUTTONDOWN: return option_selected def main_menu(self): while 1: choice = self.menu("THE RNG", ["Play", "Options", "Exit"], title_size=100, option_size=50) if choice == 0: while 1: self.run() elif choice == 1: self.options_menu() elif choice == 2: # 'exit' self.exit() # if player presses ESC or tries to exit window elif choice == 'exit': self.exit() def exit(self): save_highscores(self.highscores) terminate() def options_menu(self): option_selected = 0 while 1: if self.hotseat_multiplayer: # if hotseat, show controls for all players options = [ "Show hitboxes " + str(self.show_hitboxes), "Hotseat multiplayer " + str(self.hotseat_multiplayer), "Player 1 controls = " + self.players_controls[0], "Player 2 controls = " + self.players_controls[1], "Player 3 controls = " + self.players_controls[2], "Player 4 controls = " + self.players_controls[3], "Back" ] # indicate if any player is not playing for i in range(len(options[2:6])): if self.players_controls[i] == '': options[i + 2] = options[i + 2][:9] + "Not Playing" else: options = [ "Show hitboxes " + str(self.show_hitboxes), "Hotseat multiplayer " + str(self.hotseat_multiplayer), "Back" ] choice = self.menu("Options", options, option_selected=option_selected) if choice == 0: # toggle showing hitboxes self.show_hitboxes = not self.show_hitboxes if choice == 1: # toggle hotseat multiplayer self.hotseat_multiplayer = not self.hotseat_multiplayer elif not self.hotseat_multiplayer and choice == 2: # exit to main menu break elif 2 <= choice <= 5: player_index = choice - 2 control_type = self.players_controls[player_index] self.players_controls[player_index] = \ self.get_next_control_type(control_type) elif choice == 6 or choice == 'exit': break option_selected = choice def get_next_control_type(control_type): i = self.types_of_controls.index(control_type) - 1 return self.types_of_controls[i] def spawn_number_enemies(self): x = WINDOW_WIDTH - 10 y = random.randint(0, WINDOW_HEIGHT) speed = random.uniform(ENEMY_MIN_SPEED, ENEMY_MAX_SPEED) text = random.choice([str(random.randint(1, 1024))]) if self.level >= 4: # 1/10 chance of erratic movement from level 4 onward erratic_movement = (1 == random.randint(1, 10)) else: erratic_movement = False if self.level >= 2: # 1/10 chance of aimed movement from level 2 onward aimed = (1 == random.randint(1, 10)) else: aimed = False if self.level >= 2: # 1/4 chance of starting rotated from level 2 onward start_rotated = (1 == random.randint(1, 4)) else: start_rotated = False self.enemies.append(TextEnemy( x, y, speed, self, text, erratic=erratic_movement, aimed=aimed, rotated=start_rotated)) # spawn enemies on left to encourage player to run # and to look cool x = 10 y = random.randint(0, WINDOW_HEIGHT) # fast as the average speed of an enemy speed = (ENEMY_MAX_SPEED + ENEMY_MIN_SPEED) / 2 if self.level >= 3: # after level 3, half of the left enemies move erratically # this makes them look cooler and more terrifying erratic_movement = (1 == random.randint(1, 2)) else: erratic_movement = False self.enemies.append(TextEnemy( x, y, speed, self, text, erratic=erratic_movement)) def handle_keys(self): for event in pygame.event.get(): if event.type == QUIT: self.exit() if event.type == KEYDOWN: for player in self.players: if player.controls == 'all' or player.controls == 'wasd': if event.key == ord('a'): player.moveleft = 1 if event.key == ord('d'): player.moveright = 1 if event.key == ord('w'): player.moveup = 1 if event.key == ord('s'): player.movedown = 1 if player.controls == 'all' or player.controls == 'arrows': if event.key == K_LEFT: player.moveleft = 1 if event.key == K_RIGHT: player.moveright = 1 if event.key == K_UP: player.moveup = 1 if event.key == K_DOWN: player.movedown = 1 if player.controls == 'all' or player.controls == 'tfgh': if event.key == ord('f'): player.moveleft = 1 if event.key == ord('h'): player.moveright = 1 if event.key == ord('t'): player.moveup = 1 if event.key == ord('g'): player.movedown = 1 if player.controls == 'all' or player.controls == 'ijkl': if event.key == ord('j'): player.moveleft = 1 if event.key == ord('l'): player.moveright = 1 if event.key == ord('i'): player.moveup = 1 if event.key == ord('k'): player.movedown = 1 if player.controls == 'all' or player.controls == 'numpad': if event.key == K_KP4: player.moveleft = 1 if event.key == K_KP6: player.moveright = 1 if event.key == K_KP8: player.moveup = 1 if event.key == K_KP2: player.movedown = 1 if event.key == K_F3: # toggle showing debug info self.show_debug_info = not(self.show_debug_info) if event.key == K_F4: # toggle drawing hitboxes of enemies self.show_hitboxes = not(self.show_hitboxes) if event.type == KEYUP: if event.key == K_ESCAPE: return 'exit' for player in self.players: if player.controls == 'all' or player.controls == 'arrows': if event.key == K_LEFT: player.moveleft = 0 if event.key == K_RIGHT: player.moveright = 0 if event.key == K_UP: player.moveup = 0 if event.key == K_DOWN: player.movedown = 0 if player.controls == 'all' or player.controls == 'wasd': if event.key == ord('a'): player.moveleft = 0 if event.key == ord('d'): player.moveright = 0 if event.key == ord('w'): player.moveup = 0 if event.key == ord('s'): player.movedown = 0 if player.controls == 'all' or player.controls == 'tfgh': if event.key == ord('f'): player.moveleft = 0 if event.key == ord('h'): player.moveright = 0 if event.key == ord('t'): player.moveup = 0 if event.key == ord('g'): player.movedown = 0 if player.controls == 'all' or player.controls == 'ijkl': if event.key == ord('j'): player.moveleft = 0 if event.key == ord('l'): player.moveright = 0 if event.key == ord('i'): player.moveup = 0 if event.key == ord('k'): player.movedown = 0 if player.controls == 'all' or player.controls == 'numpad': if event.key == K_KP4: player.moveleft = 0 if event.key == K_KP6: player.moveright = 0 if event.key == K_KP8: player.moveup = 0 if event.key == K_KP2: player.movedown = 0 def handle_game_over(self): # first, save highscore # add score to highscores self.highscores.append(self.score) # sort highscores in descending order self.highscores.sort(reverse=True) # get rid of lowest highscore self.highscores.pop(-1) # dim screen screen_dimmer = Dimmer() screen_dimmer.dim(darken_factor=200) # draw gameover text, including score font = pygame.font.Font(GAME_OVER_FONT, 58) draw_text('GAME OVER', font, self.screen, (WINDOW_WIDTH / 2), 20, color=RED, position='center') # show highscores draw_text('Score:' + str(self.score), font, self.screen, (WINDOW_WIDTH / 2), 110, color=WHITE, position='center') # render highscores in a smaller font font = pygame.font.Font(GAME_OVER_FONT, 36) draw_text('HIGHSCORES', font, self.screen, WINDOW_WIDTH / 2, 150, color=WHITE, position='center') for i in range(len(self.highscores)): x = WINDOW_WIDTH / 2 y = 180 + 30 * i draw_text( str(self.highscores[i]), font, self.screen, x, y, color=WHITE, position='center') if self.highscores[i] == self.score: draw_text("YOU ->" + " " * len(str(self.highscores[i])), font, self.screen, x - 20, y + 10, color=WHITE, position='bottomright') pygame.display.update() # wait 1 second to stop people accidentally skipping this screen time.sleep(1) font = pygame.font.Font(GAME_OVER_FONT, 58) draw_text('Press Enter to play again.', font, self.screen, WINDOW_WIDTH / 2, 60, color=WHITE, position='center') pygame.display.update() self.wait_for_keypress(certainkey=K_RETURN) screen_dimmer.undim() self.game_over = True self.init_game() def wait_for_keypress(self, certainkey=None): # wait until the player presses a key # clears the pygame events, ensuring it isn't going to register an old # keypress pygame.event.clear() while True: # 5 frames a second; nothing's moving, so it should be ok: the # player won't notice self.clock.tick(5) for event in pygame.event.get(): # if player tries to close the window, terminate everything if event.type == QUIT: self.exit() if event.type == KEYDOWN: if event.key == K_ESCAPE: # pressing escape quits self.main_menu() elif certainkey is None: return # all other keys just return elif event.key == certainkey: return def run(self): self.init_game() # Blit everything to the screen self.screen.blit(self.background, (0, 0)) pygame.display.update() self.score = 0 self.spawntime = 0 self.clock.tick() self.time_until_new_level = LEVEL_LENGTH # sleep 1 millisecond at game start to prevent error when trying to # divide by time_since_last_frame when it is zero time.sleep(0.001) if self.hotseat_multiplayer: self.players = [Player(controls) for controls in self.players_controls if controls != ''] else: self.players = [Player()] self.game_over = False while not self.game_over: # Make sure game doesn't run at more than MAX_FPS frames per second self.time_since_last_frame = self.clock.tick(MAX_FPS) event = self.handle_keys() if event == "exit": # exit to main menu self.main_menu() # check if player has hit an enemy using smaller hitbox for player in self.players: player_rect = player.rect.inflate(-14, -14) if playertouchingenemy(player_rect, self.enemies): # first, clear the player's sprite with background self.screen.blit(self.background, player.rect, player.rect) self.players.remove(player) # check if all players are dead or not # check seperate from death check to stop starting with no # players if len(self.players) == 0: # show game over screen self.handle_game_over() # break the loop self.game_over = True # new level if time self.time_until_new_level -= self.time_since_last_frame if self.time_until_new_level <= 0: self.level += 1 self.time_until_new_level = LEVEL_LENGTH # spawn 'new level' enemy x = WINDOW_WIDTH - 10 y = random.randint(50, WINDOW_HEIGHT - 50) speed = ENEMY_MAX_SPEED text = "LEVEL " + str(self.level) # new level enemy uses pygame default font, due to munro having # bad hitbox at large sizes enemyfont = pygame.font.Font(None, 50) self.enemies.append(Enemy( x, y, speed, self, enemyfont.render(text, True, RED))) # spawn enemies self.spawntime += self.time_since_last_frame # spawn enemies on right if SPAWN_DELAY time has passed if self.spawntime >= ENEMY_SPAWNDELAY / math.sqrt(self.level): self.spawntime -= ENEMY_SPAWNDELAY / math.sqrt(self.level) self.score += 1 self.spawn_number_enemies() # RENDER EVERYTHING for player in self.players: self.screen.blit(self.background, player.rect, player.rect) for enemy in self.enemies: self.screen.blit(self.background, enemy.rect, enemy.rect) for player in self.players: player.update(self.time_since_last_frame) for rect in self.old_textrects: self.screen.blit(self.background, rect, rect) self.old_textrects = [] # draw score at top-middle of screen font = pygame.font.Font(GUI_FONT, 20) self.old_textrects.append( draw_text('Score:' + str(self.score), font, self.screen, WINDOW_WIDTH / 2, 20, color=RED, position='center') ) if self.show_debug_info: # show all debug info if enabled # draw FPS at topright screen fps = 1.0 / self.time_since_last_frame * 1000 self.old_textrects.append( draw_text( 'FPS:' + str(int(fps)) + '/' + str(MAX_FPS), font, self.screen, WINDOW_WIDTH - 100, 10, color=WHITE, background=BLACK, position='topleft') ) # draw frame time: time it takes to render each frame self.old_textrects.append( draw_text('FT: ' + str(self.time_since_last_frame), font, self.screen, WINDOW_WIDTH - 100, 25, color=WHITE, background=BLACK, position='topleft') ) # draw number of enemies on topright, for debug self.old_textrects.append( draw_text("Numbers:" + str(len(self.enemies)), font, self.screen, WINDOW_WIDTH - 100, 40, color=WHITE, background=BLACK, position="topleft") ) # draw enemies in enemies for enemy in self.enemies[:]: enemy.update(self.time_since_last_frame) if self.show_hitboxes: # draw slightly darker then background rectangle pygame.draw.rect( self.screen, COLLISION_RECT_COLOR, enemy.rect) for enemy in self.enemies[:]: self.screen.blit(enemy.image, enemy.rect) # draw player for player in self.players: self.screen.blit(player.image, player.rect) if self.show_hitboxes: # draw player rect pygame.draw.rect( self.screen, WHITE, player.rect.inflate(-14, -14)) # blit to screen pygame.display.update() pygame.event.pump() def main(): # Initialise screen and window pygame.init() screen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT)) pygame.display.set_caption("The RNG") pygame.display.set_icon(load_image('icon.gif')[0]) game = Game(screen) game.main_menu() if __name__ == '__main__': main() ```
[ { "content": "Here is a code snippet:\n```python\n# -*- coding: UTF-8 -*-\nimport os,pickle,glob,time,sys\nfrom tools import *\nfrom entity import *\nfrom Word import Word\nfrom Syllable import Syllable\n\nfrom ipa import ipa\nimport codecs\n\nclass Dictionary:\t# cf Word, in that Text.py will really instantiat...
[ { "content": "Here is a code snippet:\n<|memory_start|>```python\n# -*- coding: UTF-8 -*-\nimport os,pickle,glob,time,sys\nfrom tools import *\nfrom entity import *\nfrom Word import Word\nfrom Syllable import Syllable\n\nfrom ipa import ipa\nimport codecs\n\nclass Dictionary:\t# cf Word, in that Text.py will r...
```python # -*- coding: UTF-8 -*- import os,pickle,glob,time,sys from tools import * from entity import * from Word import Word from Syllable import Syllable from ipa import ipa import codecs class Dictionary: # cf Word, in that Text.py will really instantiate Dictionary_en,Dictionary_fi,usw. classnames=['Phoneme','Onset','Nucleus','Coda','Rime','SyllableBody','Syllable','Word','Phrase'] char2phons=[] for k in ipa.keys(): if len(k)>1: for x in k[1:]: char2phons.append(x) def __init__(self,lang): import prosodic dirself=prosodic.dir_prosodic libfolder=os.path.join(dirself,'lib') dictsfolder=os.path.join(dirself,'dicts') self.config=prosodic.config self.lang = lang self.libfolder = libfolder self.dictsfolder = os.path.join(dictsfolder,self.lang) sys.path.append(self.dictsfolder) self.language="" self.getprep=False self.booted=False for filename in glob.glob(os.path.join(self.dictsfolder, self.lang+'*')): self.language = filename.split(os.path.sep).pop().split(".")[0] break if not self.language: exit('!! language could not be ascertained from files in '+self.dictsfolder+'. Please name your .tsv and/or .py dictionary file(s) using a string which begins with the two characters which serve as the name for the dictionary folder (eg, "en")') self.unstressedWords=[] for filename in glob.glob(os.path.join(self.dictsfolder, 'unstressed*')): file=codecs.open(filename,encoding='utf-8') for ln in file: for word in ln.split(): self.unstressedWords.append(word) file.close() break self.maybestressedWords=[] for filename in glob.glob(os.path.join(self.dictsfolder, 'maybestressed*')): file=codecs.open(filename,encoding='utf-8') for ln in file: for word in ln.split(): self.maybestressedWords.append(word) file.close() break pyfile=os.path.join(self.dictsfolder,self.language+'.py') if os.path.exists(pyfile): self.getprep=get_class(self.language+'.get') self.cachefolder=os.path.join(self.dictsfolder,'_cache') self.dictentries=None build=False ## language objects timestart=time.clock() if being.persists: if __name__=='__main__': print "## booting ontology: " + self.language + " ..." if not os.path.exists(self.cachefolder):os.mkdir(self.cachefolder) self.storage = FileStorage(self.cachefolder+'ontology.zodb') self.db = DB(self.storage) self.conn = self.db.open() self.dict = self.conn.root() self.t=transaction if not len(self.dict.values()): build=True else: self.dict={} self.refresh() topickle=self.exists_pickle() topickle=False if topickle: self.boot_pickle(topickle) else: build=True if build: self.refresh() self.boot() if __name__=='__main__': print self.stats(prefix="\t").replace("[[time]]",str(round((time.clock() - timestart),2))) def boot(self): ## NEEDS EXTENSION if not self.getprep: bootfile=os.path.join(self.dictsfolder,self.language+'.tsv') if os.path.exists(bootfile): self.boot_general(bootfile) self.booted=True if not self.booted: exit("<error:dictionary> neither a "+self.language+".tsv nor a "+self.language+".py in directory "+self.dictsfolder) def str2unicode(self,string): o=u"" for x in string: try: o+=unicode(x) except UnicodeDecodeError: print "error" o+=unichr(ord(x)) return o def boot_general(self,bootfile): if __name__=='__main__': print "## booting dictionary: " + self.language + " ..." file=codecs.open(bootfile,encoding='utf-8') for ln in file: line=ln.split('\t') line.reverse() token=line.pop().strip() if token.startswith('#'): continue stressedipa=line.pop().strip() if ("." in token) and (token.count(".")==stressedipa.count(".")): sylls_text=token.split(".") token=token.replace(".","") else: sylls_text=None #line.reverse() #otherfeats=line if (not token in self.dict['Word']): self.dict['Word'][token]=[] self.dict['Word'][token].append((stressedipa,sylls_text)) def build(self): ## NEEDS EXTENSION pass def refresh(self): if being.persists: self.dict.clear() for k in Dictionary.classnames: self.dict[k]=OOBTree() else: for k in Dictionary.classnames: self.dict[k]={} # boot options def exists_pickle(self,picklefile=False): if not picklefile: picklefile=self.dictsfolder+self.language+'.pickle' if not os.path.exists(picklefile): return False else: return picklefile def boot_pickle(self,picklefile): file=open(picklefile) self.dict=pickle.load(file) file.close() def boot_dict(self,filename): # filename = *.txt or *.pickle print ">> loading Dictionary " + filename + "..." fileobj = open(self.dictsfolder + filename, 'r') if filename[-7:] == ".pickle": return None # the bare-bones text file [language].tsv should not be pickled--wasteful elif filename[-4:] == ".txt": dictionary = {} curLine = fileobj.readline().strip() while(curLine): curLine = fileobj.readline().strip() if(curLine == ""): break if(curLine.startswith("#")): continue tokens = curLine.split() if(len(tokens) < 2): continue curKey = tokens[0].lower() if("(" in curKey): wrd = curKey.split("(")[0].strip() else: wrd = curKey.strip() if(not wrd in dictionary): dictionary[wrd] = [] dictionary[wrd].append(curLine) self.dictentries=dictionary else: self.dictentries={} # boot_dict_specific def boot_dict_specific(self,filename,sep="\t"): newdict={} if (not "/" in filename): filename=self.dictsfolder+filename file=open(filename,'r') for line in file: linedat=line.split(sep) key=linedat[0] val=linedat[1] if key.startswith('#'): continue if (not key in newdict): newdict[key]=val else: if type(newdict[key])==list: newdict[key].append(val) else: newdict[key]=[newdict[key],val] return newdict def boot_build(self): self.build(save=False) # lookup options def lookup_db(self,tok): # needs to be rewritten rows=[] for row in self.c.execute('select entry from dict where lower(word)="' + tok.lower() + '"'): for x in row: if (not x in rows): rows.append(x) return rows def lookup_dict(self,tok): if (not tok in self.dict): return {} else: return self.dictentries[tok] def gleanPunc(self,word): return gleanPunc(word) def has(self,word): if not word: return False word=unicode(word) (p0,word,p1)=gleanPunc2(word) word_l = word.lower() ## if not there, but a contractino # if already there, say yes if word_l in self.dict['Word'] and self.dict['Word'][word_l]: return True """ for contr,add_ipa in [("'s","z"), ("'d","d")]: if word_l.endswith(contr): word_l_unc = word_l[:-2] # if the uncontracted in the dictionary if word_l_unc in self.dict['Word'] and self.dict['Word'][word_l_unc]: for obj in self.dict['Word'][word_l_unc]: if type(obj) in [tuple]: ipa,sylls_text=obj else: ipa=obj.ipa sylls_text=obj.sylls_text ipa+=add_ipa #sylls_text[-1]+=contr ## save new word if not word_l in self.dict['Word']: self.dict['Word'][word_l]=[] self.dict['Word'][word_l]+=[(ipa,sylls_text)] """ return (word_l in self.dict['Word'] and self.dict['Word'][word_l]) def use(self,classtype,key): """ HACKED 9/29/16: No longer caching SyllableBodies. Reuse was causing bugs. More thorough solution would be helpful. """ if type(key)==type([]): key=tuple(key) if (not key in self.dict[classtype]): if classtype in ['Phoneme','Onset','Nucleus','Coda','Rime','Syllable']: self.dict[classtype][key]=get_class(classtype+'.'+classtype)(key,self.lang) #return get_class(classtype+'.'+classtype)(key,self.lang) elif classtype=="SyllableBody": #self.dict[classtype][key]=self.syllphon2syll(key,self.lang) return self.syllphon2syll(key,self.lang) return self.dict[classtype][key] def haveAlready(self,classtype,key): if type(key)==type([]): key=tuple(key) return (key in self.dict[classtype]) def ipa2phons(self,stressedipa): sylls=[] for syllphon in stressedipa.split("."): syll=[] syllphon.strip() for i in range(len(syllphon)): phon=syllphon[i] if (phon in Dictionary.char2phons): continue if (phon=="`") or (phon=="'"): continue try: phonN=syllphon[i+1] except IndexError: phonN=False if phonN and (phonN in Dictionary.char2phons): phon=phon+phonN phonobj=self.use('Phoneme',phon) syll.append(phonobj) Vwaslast=False k=-1 for phon in syll: k+=1 if phon.isVowel(): if Vwaslast: if self.haveAlready('Phoneme', (Vwaslast.phon,phon.phon)): newphon=self.use('Phoneme',(Vwaslast.phon,phon.phon)) else: newphon=get_class('Phoneme.Phoneme')([self.use('Phoneme',x) for x in [Vwaslast.phon,phon.phon]], self.lang) #self.dict['Phoneme'][(Vwaslast.phon,phon.phon)]=newphon self.dict['Phoneme'][Vwaslast.phon+phon.phon]=newphon syll[k]=newphon syll.remove(Vwaslast) break else: Vwaslast=phon sylls.append(tuple(syll)) return sylls def syllphon2syll(self,syllphon,lang): onset=[] nucleus=[] coda=[] for x in syllphon: if x.isVowel(): nucleus.append(x) else: if not nucleus: onset.append(x) else: coda.append(x) return get_class('SyllableBody.SyllableBody')(self.use('Onset',onset),self.use('Rime', (self.use('Nucleus',nucleus),self.use('Coda',coda))), lang) def stressedipa2stress(self,stressedipa): o="" for x in stressedipa.split("."): if "'" in x: o+="P" elif "`" in x: o+="S" else: o+="U" return o def getStrengthStress(self,stress): prom_stress=[] prom_strength=[] for x in stress: if x=='P': prom_stress+=[1.0] elif x=='S': prom_stress+=[0.5] elif x=='U': prom_stress+=[0.0] for i,x in enumerate(prom_stress): prevx=prom_stress[i-1] if i-1>=0 else None nextx=prom_stress[i+1] if i+1<len(prom_stress) else None #print i,prevx,x,nextx if nextx!=None and nextx>x: strength=0.0 elif nextx!=None and nextx<x: strength=1.0 elif prevx!=None and prevx>x: strength=0.0 elif prevx!=None and prevx<x: strength=1.0 else: strength=None #print i,prevx,x,nextx prom_strength+=[strength] return (prom_stress,prom_strength) def getStrengthStress0(self,stress): prom_strength=[] prom_stress=[] for i in range(len(stress)): syll=stress[i] syllP=False syllN=False try: syllP=stress[i-1] except IndexError: pass try: syllN=stress[i+1] except IndexError: pass if syll=="P": prom_stress.append(1.0) if (len(stress)>1): if syllN and (syllN=="P"): prom_strength.append(None) elif syllP and (syllP=="P"): if len(stress)>2: prom_strength.append(1.0) else: prom_strength.append(None) else: prom_strength.append(1.0) elif syll=="S": prom_stress.append(0.5) if (len(stress)>1): if syllP and ((syllP=="P") or (syllP=="S")): prom_strength.append(0.5) elif syllN and (syllN=="P"): prom_strength.append(0.5) else: prom_strength.append(0.5) elif syll=="U": prom_stress.append(0.0) if (len(stress)>1): if syllP and ((syllP=="P") or (syllP=="S")): prom_strength.append(0.0) elif syllN and ((syllN=="P") or (syllN=="S")): prom_strength.append(0.0) else: prom_strength.append(None) if len(stress)==1: prom_strength=[None] return (prom_stress,prom_strength) def reset(self): for classtype in [ct for ct in self.dict if ct!='Word']: self.dict[classtype]={} for word in self.dict['Word']: self.dict['Word'][word]=[((wordobj.ipa,wordobj.sylls_text) if type(wordobj)!=tuple else wordobj) for wordobj in self.dict['Word'][word]] def make(self,stressedipasylls_text,token): stressedipa=stressedipasylls_text[0] sylls_text=stressedipasylls_text[1] stress=self.stressedipa2stress(stressedipa) (prom_stress,prom_strength)=self.getStrengthStress(stress) syllphons=self.ipa2phons(stressedipa) sylls=[] for i in range(len(syllphons)): syllbody=self.use('SyllableBody',syllphons[i]) syll=self.use('Syllable',(syllbody,prom_strength[i],prom_stress[i])) #print token,i,syllbody,syll,syllphons,stressedipa,stress,prom_stress,prom_strength sylls.append(syll) word=Word(token,sylls,sylls_text) word.ipa=stressedipa word.stress=stress word.lang=self.lang return word def maybeUnstress(self,words): word=words[0].token.lower() def unstress_word(wordobj): #wordobj.feat('functionword',True) wordobj.feats['functionword']=True wordobj.stress="" for child in wordobj.children: wordobj.stress+="U" child.feats['prom.stress']=0.0 child.feats['prom.kalevala']=None child.children[0].feats['prom.weight']=False if word in self.maybestressedWords: # only for monosyllabs wordobjs=self.dict['Word'][word] stresses = [wordobj.stress for wordobj in wordobjs] if max([len(sx) for sx in stresses])>1: return wordobjs if 'U' in stresses and 'P' in stresses: unstressed_words = [wordobj for wordobj in wordobjs if wordobj.stress=='U'] for wordobj in unstressed_words: unstress_word(wordobj) return wordobjs else: wordobj1=wordobjs[0] ipa=wordobj1.ipa if 'U' in stresses and not 'P' in stresses: newipa="'"+ipa newobjs=[self.make((_ipa,None),word) for _ipa in [ipa,newipa]] #newobjs[0].feat('functionword',True) newobjs[0].feats['functionword']=True elif 'P' in stresses and not 'U' in stresses: newipa=ipa[1:] newobjs=[self.make((_ipa,None),word) for _ipa in [ipa,newipa]] #newobjs[-1].feat('functionword',True) newobjs[-1].feats['functionword']=True else: print "??",word,stresses return newobjs elif word in self.unstressedWords: wordobj=self.dict['Word'][word][0] unstress_word(wordobj) return [wordobj] return words def get(self,word,stress_ambiguity=True): if type(word)==str: word=word.decode('utf-8',errors='ignore') (word,punct)=gleanPunc(word) if self.has(word): words=self.dict['Word'][word.lower()] elif self.getprep: words=self.getprep(word,config=self.config) else: return [Word(word,[],None)] if not words: return [Word(word,[],None)] if type(words)==list: if type(words[0])==tuple: # New word needs to be built wordobjs=[] for wordtuple in words: wrd=wordtuple[:2] attrs=wordtuple[2] if len(wordtuple)>2 else {} wordobj=self.make(wrd,word) for _k,_v in attrs.items(): setattr(wordobj,_k,_v) wordobjs+=[wordobj] self.dict['Word'][word.lower()]=wordobjs return self.maybeUnstress(wordobjs) if stress_ambiguity else wordobjs else: wordobjs=words else: wordobjs=[words] return self.maybeUnstress(wordobjs) if stress_ambiguity else wordobjs ## featpaths:experimental def featpath(self): pass # save options def save_tabbed(self): for k,v in self.dict.items(): if k!='word': continue # just the words for now o="token\tstress\tipa\n" for kk,vv in v.items(): if type(vv)!=type([]): vv=[vv] for vvv in vv: if not vvv: continue o+=str(kk)+"\t"+str(vvv.str_ipasyllstress())+"\n" file=open(self.dictsfolder+self.language+'.tsv','w') file.write(o) file.close() def save_pickle(self): file=open(self.dictsfolder+self.language+'.pickle','w') pickle.dump(self.dict,file) file.close() def persist(self): if being.persists: self.t.commit() def save(self): if being.persists: print "saving..." self.t.commit() #transaction.commit() self.save_tabbed() def words(self): words=[] for k,v in self.dict['word'].items(): for vv in v: words.append(vv) return words def close(self): if being.persists: self.conn.close() ## output option def stats(self,prefix="\t"): #self.numents={} o="" for k,v in self.dict.items(): if not len(v): continue if k[-2:]=="us": ks=k[:-2]+"i" else: ks=k+'s' o+=prefix + str(len(v)).replace('0','?') + ' ' + ks + '\n' if o: return "## [[[time]]s] loaded:\n"+o else: return "" return o ```
[ { "content": "Recreate the entire code block with identical formatting:\n```python\n# Copyright (c) 2015, Robert Escriva\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# * R...
[ { "content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n# Copyright (c) 2015, Robert Escriva\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met...
```python # Copyright (c) 2015, Robert Escriva # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of GitSwitch nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. INFO_REFS = '/info/refs' UPLOAD_PACK = '/git-upload-pack' RECEIVE_PACK = '/git-receive-pack' BLACKLISTED_SUFFIXES = (INFO_REFS, UPLOAD_PACK, RECEIVE_PACK) BLACKLISTED_NAMES = ('new', '.', '..') NO_CACHE_HEADERS = [ ('Expires', 'Fri, 01 Jan 1980 00:00:00 GMT'), ('Pragma', 'no-cache'), ('Cache-Control', 'no-cache, max-age=0, must-revalidate'), ] ```
[ { "content": "```python\nimport numpy as np\n\nfrom fos.actor.primitives import AABBPrimitive\nfrom pyglet.gl import GLfloat\nfrom pyglet.gl import *\n\nclass Actor(object):\n \"\"\" Define a visualization object in Fos \"\"\"\n \n def __init__(self,\n affine = None,\n a...
[ { "content": "<|memory_start|>```python\nimport numpy as np\n\nfrom fos.actor.primitives import AABBPrimitive\nfrom pyglet.gl import GLfloat\nfrom pyglet.gl import *\n\nclass Actor(object):\n \"\"\" Define a visualization object in Fos \"\"\"\n \n def __init__(self,\n affine = None,\n ...
```python import numpy as np from fos.actor.primitives import AABBPrimitive from pyglet.gl import GLfloat from pyglet.gl import * class Actor(object): """ Define a visualization object in Fos """ def __init__(self, affine = None, aabb = None, force_center_data = False, **kwargs): """ Create an actor Parameters ---------- affine : 4x4 array the affine is expected to be normal, i.e. it has only rotational and translational components, but no shears the affine is applied to the input vertices data to position the actor in the world space. If set to none, an affine is generated to positions the actor optimally for the camera view aabb : (corner1, corner2) the axis-aligned bounding box. axis-aligned means aligned with the world coordinate system axes corner1 : 3x1 array bottom-left-front point of the box when look into z direction corner2 : 3x1 array top-right-back point of the box If set to None, an axis aligned boundingbox is computed using the input vertices force_center_data : boolean if set to true, the mean of vertices location is subtracted from all the vertices. this is useful to compute a better bounding box and if the data has no meaningful affine obb : (center, orientation, halfwidths) center : 3x1 array the center point of the aabb orientation : 3x3 array orthogonal unit vectors halfwidths : 3x1 array box halfwidths along each axis """ # data related information self.vertices = None self.living = False self.show_aabb = True # self.connectivity = None # self.field = None # scalar, vector, tensor # self.colormap = None # self.texture = None # movement related information. use the # self.velocity = None # self.acceleration = None # event related information # self.event_queue = None # mouse or keyboard events on the actor # self.event_handlers = None # related: menu options for the actor def setup(self): """ Data preparation """ # display lists, vbo # prepare different resolutions pass def update(self, dt): """ Update the actor dt from the global timer """ pass def draw_aabb(self): """ Draw the actor """ if self.show_aabb: glPushMatrix() glPolygonMode(GL_FRONT_AND_BACK, GL_LINE) glLineWidth(1.0) glColor3f(1.0, 1.0, 0.0) glEnableClientState(GL_VERTEX_ARRAY) glVertexPointer(3, GL_FLOAT, 0, self.aabb.vertices_ptr) glDrawElements(self.aabb.mode,self.aabb.indices_nr,self.aabb.type,self.aabb.indices_ptr) glDisableClientState(GL_VERTEX_ARRAY) glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) glPopMatrix() def delete(self): """ Removing the geometry """ pass def info(self): """ Show information about the actor """ # debug mode print "this actor is at ", self print "number of vertices", len(self.vertices) print "is the actor living ?", self.living if not self.aabb is None: print "has boundary box", self.aabb def to_raytracer_file(self): """ Save the geometry to a file readable by a raytracer """ pass def process_pickray(self, near, far): """ Process the pick ray like intersecting with the actor """ pass def process_keys(self,symbol,modifiers): pass def process_mouse_motion(self,x,y,dx,dy): pass def make_aabb(self, aabb = None, margin = 30): """ Make the axis aligned bounding box. Parameters ---------- aabb : 2-tuple of numpy arrays of shape(3,) Defining the box by left-bottom-front and the top-right-back coordinate points. If None, a bounding box based on the vertices is computed. margin : float A margin to be added to the computed bounding box """ # if no aabb is given, compute one if aabb == None: # compute an axis aligned bounding box # based on the vertices coord1 = np.array([self.vertices[:,0].min(), self.vertices[:,1].min(), self.vertices[:,2].min()], dtype = np.float32) coord2 = np.array([self.vertices[:,0].max(), self.vertices[:,1].max(), self.vertices[:,2].max()], dtype = np.float32) self.aabb = AABBPrimitive(blf = coord1, trb = coord2, margin = margin) else: assert len(aabb) == 2 # otherwise set to given aabb self.aabb = AABBPrimitive(blf = aabb[0], trb = aabb[1], margin = margin) def get_aabb_coords(self): """ Returns AABB coordinates blf and trb in world space (using the affine) """ ab1 = self.aabb.coord[0] ab2 = self.aabb.coord[1] r1 = np.dot(self.affine, np.array( [ab1[0], ab1[1], ab1[2], 1.0] ) ) r2 = np.dot(self.affine, np.array( [ab2[0], ab2[1], ab2[2], 1.0] ) ) return (r1[:3], r2[:3]) def make_obb(self): pass # just reuse the aabb points # leftbottom, righttop = self.aabb # # center = np.mean( np.vstack( (leftbottom, righttop) ), axis = 0) # halfwidths = (leftbottom - righttop) / 2.0 # # in our default network, this obb is axis-aligned, thus the # # obb is the identity # orientation = np.eye( 3, 3 ) # # self.obb = (center, halfwidths, orientation) def bounding_sphere(self): """ Compute the bounding sphere """ pass # can use PCA? def bouding_ellipsoid(self): """ Compute the bounding elipsoid """ pass # can use PCA? ## affine logic ### def set_affine(self, affine): # update the affine print "update affine", self.affine self.affine = affine self._update_glaffine() def scale(self, scale_factor): """ Scales the actor by scale factor. Multiplies the diagonal of the affine for the first 3 elements """ self.affine[0,0] *= scale_factor self.affine[1,1] *= scale_factor self.affine[2,2] *= scale_factor self._update_glaffine() def translate(self, dx, dy, dz): """ Translate the actor. Remember the OpenGL has right-handed coordinate system """ self.affine[0,3] += dx self.affine[1,3] += dy self.affine[2,3] += dz self._update_glaffine() def set_position(self, x, y, z): """ Position the actor. Remember the OpenGL has right-handed coordinate system """ self.affine[0,3] += x self.affine[1,3] += y self.affine[2,3] += z self._update_glaffine() def _update_glaffine(self): self.glaffine = (GLfloat * 16)(*tuple(self.affine.T.ravel())) # life processes ### def start(self, lifespan = 10, tickingtime = 2.0): print "the actor is alive" self.living = True self.internal_timestamp = 0.0 # call self.update every tickingtime def stop(self): print "the actor stops living" self.living = False def cont(self): print "continue to live happily" self.living = True ```
[ { "content": "Write the code verbatim:\n```python\n#\n# Jinja2 (http://jinja.pocoo.org) based template renderer.\n#\n# Author: Satoru SATOH <ssato redhat.com>\n# License: MIT\n#\n# pylint: disable=unused-argument\n\"\"\"Template rendering module for jinja2-based template config files.\n\"\"\"\nfrom __future__ i...
[ { "content": "Write the code verbatim:\n<|memory_start|>```python\n#\n# Jinja2 (http://jinja.pocoo.org) based template renderer.\n#\n# Author: Satoru SATOH <ssato redhat.com>\n# License: MIT\n#\n# pylint: disable=unused-argument\n\"\"\"Template rendering module for jinja2-based template config files.\n\"\"\"\nf...
```python # # Jinja2 (http://jinja.pocoo.org) based template renderer. # # Author: Satoru SATOH <ssato redhat.com> # License: MIT # # pylint: disable=unused-argument """Template rendering module for jinja2-based template config files. """ from __future__ import absolute_import import logging import os import anyconfig.compat LOGGER = logging.getLogger(__name__) SUPPORTED = False try: import jinja2 from jinja2.exceptions import TemplateNotFound SUPPORTED = True def tmpl_env(paths): """ :param paths: A list of template search paths """ return jinja2.Environment(loader=jinja2.FileSystemLoader(paths)) except ImportError: LOGGER.warn("Jinja2 is not available on your system, so " "template support will be disabled.") class TemplateNotFound(RuntimeError): """Dummy exception""" pass def tmpl_env(*args): """Dummy function""" return None def make_template_paths(template_file, paths=None): """ Make up a list of template search paths from given `template_file` (absolute or relative path to the template file) and/or `paths` (a list of template search paths given by user). NOTE: User-given `paths` will take higher priority over a dir of template_file. :param template_file: Absolute or relative path to the template file :param paths: A list of template search paths >>> make_template_paths("/path/to/a/template") ['/path/to/a'] >>> make_template_paths("/path/to/a/template", ["/tmp"]) ['/tmp', '/path/to/a'] >>> os.chdir("/tmp") >>> make_template_paths("./path/to/a/template") ['/tmp/path/to/a'] >>> make_template_paths("./path/to/a/template", ["/tmp"]) ['/tmp', '/tmp/path/to/a'] """ tmpldir = os.path.abspath(os.path.dirname(template_file)) return [tmpldir] if paths is None else paths + [tmpldir] def render_s(tmpl_s, ctx=None, paths=None): """ Compile and render given template string `tmpl_s` with context `context`. :param tmpl_s: Template string :param ctx: Context dict needed to instantiate templates :param paths: Template search paths >>> render_s("aaa") == "aaa" True >>> s = render_s('a = {{ a }}, b = "{{ b }}"', {'a': 1, 'b': 'bbb'}) >>> if SUPPORTED: ... assert s == 'a = 1, b = "bbb"' """ if paths is None: paths = [os.curdir] env = tmpl_env(paths) if env is None: return tmpl_s if ctx is None: ctx = {} return tmpl_env(paths).from_string(tmpl_s).render(**ctx) def render_impl(template_file, ctx=None, paths=None): """ :param template_file: Absolute or relative path to the template file :param ctx: Context dict needed to instantiate templates """ env = tmpl_env(make_template_paths(template_file, paths)) if env is None: return anyconfig.compat.copen(template_file).read() if ctx is None: ctx = {} return env.get_template(os.path.basename(template_file)).render(**ctx) def render(filepath, ctx=None, paths=None, ask=False): """ Compile and render template and return the result as a string. :param template_file: Absolute or relative path to the template file :param ctx: Context dict needed to instantiate templates :param paths: Template search paths :param ask: Ask user for missing template location if True """ try: return render_impl(filepath, ctx, paths) except TemplateNotFound as mtmpl: if not ask: raise RuntimeError("Template Not found: " + str(mtmpl)) usr_tmpl = anyconfig.compat.raw_input("\n*** Missing template " "'%s'. Please enter absolute " "or relative path starting from " "'.' to the template file: " % mtmpl) usr_tmpl = os.path.normpath(usr_tmpl.strip()) paths = make_template_paths(usr_tmpl, paths) return render_impl(usr_tmpl, ctx, paths) # vim:sw=4:ts=4:et: ```
[ { "content": "Recreate the original code text:\n```python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport re\nimport sys\nimport theano\nimport theano.tensor as T\n\nfrom nltk import corpus\nfrom sklearn.metrics import accuracy_score, classification_report\nfrom sklearn.cross_validation import train_test...
[ { "content": "Recreate the original code text:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport re\nimport sys\nimport theano\nimport theano.tensor as T\n\nfrom nltk import corpus\nfrom sklearn.metrics import accuracy_score, classification_report\nfrom sklearn.cross_validation i...
```python # -*- coding: utf-8 -*- import numpy as np import re import sys import theano import theano.tensor as T from nltk import corpus from sklearn.metrics import accuracy_score, classification_report from sklearn.cross_validation import train_test_split from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction import text np.random.seed(123) # For reproducibility def process_newsgroups_document(document): # To simplify, we ignore everything thas isn't a word document = re.sub(r"[^a-zA-Z]", " ", document) # We only make use of lower case words words = document.lower().split() # We filter out every stopword for the english language stopwords = set(corpus.stopwords.words("english")) document = " ".join([word for word in words if word not in stopwords]) return document print "Fetching and processing 20 Newsgroup" sys.stdout.flush() newsgroups = fetch_20newsgroups(subset='all') vectorizer = text.CountVectorizer(analyzer='word', preprocessor=process_newsgroups_document, max_features=5000) newsgroups_dataset = vectorizer.fit_transform(newsgroups.data).todense().astype(theano.config.floatX) newsgroups_target = newsgroups.target ng_X_train, ng_X_test, ng_y_train, ng_y_test = train_test_split(newsgroups_dataset, newsgroups_target, test_size=0.2) print "Converting train variables to theano" sys.stdout.flush() # Convert the data to theano shared variables ng_X_train = theano.shared(ng_X_train, borrow=True) ng_y_train = theano.shared(ng_y_train, borrow=True) print "Setting up parameters" sys.stdout.flush() N = newsgroups_dataset.shape[0] # Number of examples in the dataset. n_input = newsgroups_dataset.shape[1] # Number of features of the dataset. Input of the Neural Network. n_output = len(newsgroups.target_names) # Number of classes in the dataset. Output of the Neural Network. n_h1 = 2500 # Size of the first layer n_h2 = 1000 # Size of the second layer alpha = 0.01 # Learning rate parameter lambda_reg = 0.01 # Lambda value for regularization epochs = 500 # Number of epochs for gradient descent batch_size = 128 # Size of the minibatches to perform sgd train_batches = ng_X_train.get_value().shape[0] / batch_size print "Defining computational graph" sys.stdout.flush() # Stateless variables to handle the input index = T.lscalar('index') # Index of a minibatch X = T.matrix('X') y = T.lvector('y') # First layer weight matrix and bias W1 = theano.shared( value=np.random.uniform( low=-np.sqrt(6. / (n_input + n_h1)), high=np.sqrt(6. / (n_input + n_h1)), size=(n_input, n_h1) ).astype(theano.config.floatX), name='W1', borrow=True ) b1 = theano.shared( value=np.zeros((n_h1,), dtype=theano.config.floatX), name='b1', borrow=True ) # Second layer weight matrix and bias W2 = theano.shared( value=np.random.uniform( low=-np.sqrt(6. / (n_h1 + n_h2)), high=np.sqrt(6. / (n_h1 + n_h2)), size=(n_h1, n_h2) ).astype(theano.config.floatX), name='W2', borrow=True ) b2 = theano.shared( value=np.zeros((n_h2,), dtype=theano.config.floatX), name='b2', borrow=True ) # Output layer weight matrix and bias W3 = theano.shared( value=np.random.uniform( low=-np.sqrt(6. / (n_h2 + n_output)), high=np.sqrt(6. / (n_h2 + n_output)), size=(n_h2, n_output) ).astype(theano.config.floatX), name='W3', borrow=True ) b3 = theano.shared( value=np.zeros((n_output,), dtype=theano.config.floatX), name='b3', borrow=True ) z1 = T.dot(X, W1) + b1 # Size: N x n_h1 a1 = T.tanh(z1) # Size: N x n_h1 z2 = T.dot(a1, W2) + b2 # Size: N x n_h2 a2 = T.tanh(z2) # Size: N x n_h2 z3 = T.dot(a2, W3) + b3 # Size: N x n_output y_out = T.nnet.softmax(z3) # Size: N x n_output y_pred = T.argmax(y_out, axis=1) # Size: N # Regularization term to sum in the loss function loss_reg = 1./N * lambda_reg/2 * (T.sum(T.sqr(W1)) + T.sum(T.sqr(W2)) + T.sum(T.sqr(W3))) # Loss function loss = T.nnet.categorical_crossentropy(y_out, y).mean() + loss_reg print "Compiling theano functions" sys.stdout.flush() # Define the functions forward_propagation = theano.function([X], y_out) loss_calculation = theano.function([X, y], loss) predict = theano.function([X], y_pred) print "Getting gradients" sys.stdout.flush() dJdW1 = T.grad(loss, wrt=W1) dJdb1 = T.grad(loss, wrt=b1) dJdW2 = T.grad(loss, wrt=W2) dJdb2 = T.grad(loss, wrt=b2) dJdW3 = T.grad(loss, wrt=W3) dJdb3 = T.grad(loss, wrt=b3) print "Setting updates" sys.stdout.flush() updates = [ (W1, W1 - alpha * dJdW1), # Update step. W1 = W1 - alpha * dJdW1 (b1, b1 - alpha * dJdb1), # Update step. b1 = b1 - alpha * dJdb1 (W2, W2 - alpha * dJdW2), # Update step. W2 = W2 - alpha * dJdW2 (b2, b2 - alpha * dJdb2), # Update step. b2 = b2 - alpha * dJdb2 (W3, W3 - alpha * dJdW3), # Update step. W3 = W3 - alpha * dJdW3 (b3, b3 - alpha * dJdb3), # Update step. b3 = b3 - alpha * dJdb3 ] print "Compiling gradient step" sys.stdout.flush() gradient_step = theano.function( inputs=[index], outputs=loss, updates=updates, givens={ X: ng_X_train[index * batch_size: (index + 1) * batch_size], y: ng_y_train[index * batch_size: (index + 1) * batch_size] } ) print "Starting training" sys.stdout.flush() for i in xrange(epochs): # We train for epochs times for mini_batch in xrange(train_batches): gradient_step(mini_batch) if i % 50 == 0: print "Loss for iteration {}: {}".format( i, loss_calculation(ng_X_train.get_value(), ng_y_train.get_value()) ) sys.stdout.flush() print "Training finished. Getting some results." sys.stdout.flush() predictions = predict(ng_X_test) print "Accuracy: {:.3f}".format(accuracy_score(ng_y_test, predictions)) sys.stdout.flush() print "Classification report" print classification_report(ng_y_test, predictions, target_names=newsgroups.target_names) sys.stdout.flush() ```
[ { "content": "Here is a code snippet:\n```python\n# Copyright (c) 2008,2015,2018 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"Collection of generally useful utility code from the cookbook.\"\"\"\n\nimport os\n\nimport numpy as np\n...
[ { "content": "Here is a code snippet:\n<|memory_start|>```python\n# Copyright (c) 2008,2015,2018 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"Collection of generally useful utility code from the cookbook.\"\"\"\n\nimport os\n\nimpo...
```python # Copyright (c) 2008,2015,2018 MetPy Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause """Collection of generally useful utility code from the cookbook.""" import os import numpy as np import pooch from . import __version__ try: string_type = basestring except NameError: string_type = str # TODO: This can go away when we remove Python 2 def is_string_like(s): """Check if an object is a string.""" return isinstance(s, string_type) POOCH = pooch.create( path=pooch.os_cache('metpy'), base_url='https://github.com/Unidata/MetPy/raw/{version}/staticdata/', version='v' + __version__, version_dev='master', env='TEST_DATA_DIR') # Check if we're running from a git clone and if so, bash the path attribute with the path # to git's local data store (un-versioned) # Look for the staticdata directory (i.e. this is a git checkout) if os.path.exists(os.path.join(os.path.dirname(__file__), '..', 'staticdata')): POOCH.path = os.path.join(os.path.dirname(__file__), '..', 'staticdata') POOCH.load_registry(os.path.join(os.path.dirname(__file__), 'static-data-manifest.txt')) def get_test_data(fname, as_file_obj=True): """Access a file from MetPy's collection of test data.""" path = POOCH.fetch(fname) # If we want a file object, open it, trying to guess whether this should be binary mode # or not if as_file_obj: return open(path, 'rb') return path class Registry(object): """Provide a generic function registry. This provides a class to instantiate, which then has a `register` method that can be used as a decorator on functions to register them under a particular name. """ def __init__(self): """Initialize an empty registry.""" self._registry = {} def register(self, name): """Register a callable with the registry under a particular name. Parameters ---------- name : str The name under which to register a function Returns ------- dec : callable A decorator that takes a function and will register it under the name. """ def dec(func): self._registry[name] = func return func return dec def __getitem__(self, name): """Return any callable registered under name.""" return self._registry[name] def broadcast_indices(x, minv, ndim, axis): """Calculate index values to properly broadcast index array within data array. See usage in interp. """ ret = [] for dim in range(ndim): if dim == axis: ret.append(minv) else: broadcast_slice = [np.newaxis] * ndim broadcast_slice[dim] = slice(None) dim_inds = np.arange(x.shape[dim]) ret.append(dim_inds[tuple(broadcast_slice)]) return tuple(ret) def iterable(value): """Determine if value can be iterated over.""" # Special case for pint Quantities if hasattr(value, 'magnitude'): value = value.magnitude return np.iterable(value) __all__ = ('Registry', 'broadcast_indices', 'get_test_data', 'is_string_like', 'iterable') ```
[ { "content": "```python\n__author__ = 'tjhunter'\n\nimport build\nimport json\nimport pylab as pl\nfrom matplotlib.collections import LineCollection\n# Draws the network as a pdf and SVG file.\n\ndef draw_network(ax, fd, link_style):\n def decode_line(l):\n #print l\n dct = json.loads(l)\n lats = dct[...
[ { "content": "<|memory_start|>```python\n__author__ = 'tjhunter'\n\nimport build\nimport json\nimport pylab as pl\nfrom matplotlib.collections import LineCollection\n# Draws the network as a pdf and SVG file.\n\ndef draw_network(ax, fd, link_style):\n def decode_line(l):\n #print l\n dct = json.loads(l)\...
```python __author__ = 'tjhunter' import build import json import pylab as pl from matplotlib.collections import LineCollection # Draws the network as a pdf and SVG file. def draw_network(ax, fd, link_style): def decode_line(l): #print l dct = json.loads(l) lats = dct['lats'] lons = dct['lons'] return zip(lons, lats) lines = [decode_line(l) for l in fd] #print lines xmin = min([x for l in lines for x,y in l]) xmax = max([x for l in lines for x,y in l]) ymin = min([y for l in lines for x,y in l]) ymax = max([y for l in lines for x,y in l]) lc = LineCollection(lines, **link_style) ax.add_collection(lc, autolim=True) return ((xmin,xmax),(ymin,ymax)) fname = build.data_name('kdd/net_export_6.json') fig = pl.figure("fig1",figsize=(10,10)) ax = fig.gca() ax.set_axis_off() style = {'colors':'k','linewidths':0.5} with open(fname) as f: (xlims, ylims) = draw_network(ax, f, style) ax.set_xlim(*xlims) ax.set_ylim(*ylims) # Saving in pdf is a bit slow build.save_figure(fig, 'figures-kdd/network_export_6',save_svg=True) ```
[ { "content": "Here is a code snippet:\n```python\n# -*- coding: utf-8 -*-\n\n########################## Copyrights and license ############################\n# #\n# Copyright 2011-2015 Christian Lupien <christian.lupien@usherbrooke.ca> ...
[ { "content": "Here is a code snippet:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n########################## Copyrights and license ############################\n# #\n# Copyright 2011-2015 Christian Lupien <christian.lupien@u...
```python # -*- coding: utf-8 -*- ########################## Copyrights and license ############################ # # # Copyright 2011-2015 Christian Lupien <christian.lupien@usherbrooke.ca> # # # # This file is part of pyHegel. http://github.com/lupien/pyHegel # # # # pyHegel is free software: you can redistribute it and/or modify it under # # the terms of the GNU Lesser General Public License as published by the # # Free Software Foundation, either version 3 of the License, or (at your # # option) any later version. # # # # pyHegel is distributed in the hope that it will be useful, but WITHOUT # # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public # # License for more details. # # # # You should have received a copy of the GNU Lesser General Public License # # along with pyHegel. If not, see <http://www.gnu.org/licenses/>. # # # ############################################################################## from __future__ import absolute_import # Use version according to option 5 in https://packaging.python.org/en/latest/single_source_version.html # make sure that new versions numbers compare properly when using # pkg_resources.parse_version __version__ = '1.0.0rc2' __copyright__ = '2011-2015 Christian Lupien' def start_pyHegel(): """ This is the recommanded way to start pyHegel. It starts ipython in a standard way (pylab, autocall enabled,...) and then loads and initializes the pyHegel commands. If the python session was started with command line arguments --console, it will try to start pyHegel in the Console program that comes with pythonxy. This is windows only. If you later need access to the commands in a module: import pyHegel.commands as cmds cmds.get(somedevice) or from pyHegel.commands import * get(somedevice) or any other variants you want. """ import sys import os if os.name == 'nt' and len(sys.argv) == 2 and sys.argv[1] == '--console': start_console() else: from . import main main.main_start() def start_console(): from . import win_console_helper win_console_helper.start_console() ```
[ { "content": "```python\nfrom ..with_request import WithRequest\n\n\nclass Account(WithRequest):\n\n def __init__(self, url, session_id):\n self.base_url = url\n self.session_id=session_id\n\n def account_get_by_id(self, i_account):\n \"\"\"Get account by id\"\"\"\n endpoint = ...
[ { "content": "<|memory_start|>```python\nfrom ..with_request import WithRequest\n\n\nclass Account(WithRequest):\n\n def __init__(self, url, session_id):\n self.base_url = url\n self.session_id=session_id\n\n def account_get_by_id(self, i_account):\n \"\"\"Get account by id\"\"\"\n ...
```python from ..with_request import WithRequest class Account(WithRequest): def __init__(self, url, session_id): self.base_url = url self.session_id=session_id def account_get_by_id(self, i_account): """Get account by id""" endpoint = "{0}".format('/rest/Account/get_account_info') auth = '{{ "session_id": "{0}" }}'.format(self.session_id) params = '{{ "i_account": {0} }}'.format(i_account) payload = { u'auth_info': auth, u'params': params } return self.post_it(endpoint, payload, {}) def account_get_by_pin(self, pin_number): """Get account by pin number""" endpoint = "{0}".format('/rest/Account/get_account_info') auth = '{{ "session_id": "{0}" }}'.format(self.session_id) params = '{{ "id": "{0}" }}'.format(pin_number) payload = { u'auth_info': auth, u'params': params } return self.post_it(endpoint, payload, {}) def account_terminate_by_id(self, i_account): """Terminate account by id""" endpoint = "{0}".format('/rest/Account/terminate_account') auth = '{{ "session_id": "{0}" }}'.format(self.session_id) params = '{{ "i_account": {0} }}'.format(i_account) payload = { u'auth_info': auth, u'params': params } return self.post_it(endpoint, payload, {}) ```
[ { "content": "Here is a code file:\n```python\nfrom sfsimodels import models\nimport sfsimodels as sm\nimport json\nimport numpy as np\n\n\ndef test_link_building_and_soil():\n number_of_storeys = 6\n interstorey_height = 3.4 # m\n n_bays = 3\n\n fb = models.FrameBuilding(number_of_storeys, n_bays)...
[ { "content": "Here is a code file:\n<|memory_start|>```python\nfrom sfsimodels import models\nimport sfsimodels as sm\nimport json\nimport numpy as np\n\n\ndef test_link_building_and_soil():\n number_of_storeys = 6\n interstorey_height = 3.4 # m\n n_bays = 3\n\n fb = models.FrameBuilding(number_of_...
```python from sfsimodels import models import sfsimodels as sm import json import numpy as np def test_link_building_and_soil(): number_of_storeys = 6 interstorey_height = 3.4 # m n_bays = 3 fb = models.FrameBuilding(number_of_storeys, n_bays) fb.id = 1 fb.interstorey_heights = interstorey_height * np.ones(number_of_storeys) fb.floor_length = 18.0 # m fb.floor_width = 16.0 # m fd = models.RaftFoundation() fd.length = 4 fd.width = 6 fd.height = 0.0 fd.density = 3 fd2 = models.RaftFoundation() fd2.length = 14 fd2.width = 16 fd2.height = 10.0 fd2.density = 13 # link building to foundation fd.set_building(fb, two_way=False) assert fd.building.n_bays == 3 assert fb.foundation is None fd.set_building(fb, two_way=True) assert fb.foundation.length == 4 # one way link fb.set_foundation(fd2, two_way=False) assert fb.foundation.length == 14 assert fd2.building is None fb.set_foundation(fd2, two_way=True) assert fb.foundation.length == 14 assert np.isclose(fd2.building.floor_width, 16.0) structure = models.SDOFBuilding() structure.set_foundation(fd, two_way=True) assert structure.foundation.width == 6 assert isinstance(fd.building, models.SDOFBuilding) def test_save_and_load_w_linked_building_and_soil(): number_of_storeys = 6 interstorey_height = 3.4 # m wb = models.WallBuilding(number_of_storeys) wb.id = 1 wb.interstorey_heights = interstorey_height * np.ones(number_of_storeys) wb.floor_length = 18.0 # m wb.floor_width = 16.0 # m fd = models.RaftFoundation() fd.length = 4 fd.width = 6 fd.height = 0.0 fd.density = 3 fd.id = 1 # link building to foundation fd.set_building(wb, two_way=False) assert fd.building.n_storeys == number_of_storeys assert wb.foundation is None fd.set_building(wb, two_way=True) assert wb.foundation.length == 4 ecp_output = sm.Output() ecp_output.add_to_dict(wb) ecp_output.add_to_dict(fd) ecp_output.name = "a single wall building" ecp_output.units = "N, kg, m, s" ecp_output.comments = "" p_str = json.dumps(ecp_output.to_dict(), skipkeys=["__repr__"], indent=4) objs = sm.loads_json(p_str) building = objs["building"][1] foundation = objs["foundation"][1] assert foundation.width == 6 assert building.foundation.width == 6, building.fd assert np.isclose(building.floor_length, 18.0) if __name__ == '__main__': test_save_and_load_w_linked_building_and_soil() ```
[ { "content": "Here is the code block:\n```python\n#!/usr/bin/python\n#\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache...
[ { "content": "Here is the code block:\n<|memory_start|>```python\n#!/usr/bin/python\n#\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# h...
```python #!/usr/bin/python # # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Compute an expanded vocabulary of embeddings using a word2vec model. This script loads the word embeddings from a trained skip thought model and from a trained word2vec model (typically with a larger vocabulary). It trains a linear regression model without regularization to learn a linear mapping from the word2vec embedding space to the skip thought embedding space. The model is then applied to all words in the word2vec vocabulary, yielding "skip thought embeddings" for the union of the two vocabularies. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os.path import numpy as np import sklearn.linear_model import tensorflow as tf def load_skip_thought_embeddings(checkpoint_path, vocab_size, embedding_dim): """Loads the embedding matrix from a skip thought model checkpoint. Args: checkpoint_path: Model checkpoint file or directory containing a checkpoint file. vocab_size: Number of words in the vocabulary. embedding_dim: Word embedding dimension. Returns: word_embedding: A numpy array of shape [vocab_size, embedding_dim]. Raises: ValueError: If no checkpoint file matches checkpoint_path. """ if tf.gfile.IsDirectory(checkpoint_path): checkpoint_file = tf.train.latest_checkpoint(checkpoint_path) if not checkpoint_file: raise ValueError("No checkpoint file found in %s" % checkpoint_path) else: checkpoint_file = checkpoint_path g = tf.Graph() with g.as_default(): word_embedding_tensor = tf.get_variable( name="word_embedding", shape=[vocab_size, embedding_dim]) saver = tf.train.Saver() with tf.Session(graph=g) as sess: tf.logging.info("Loading skip thought embedding matrix from %s", checkpoint_file) saver.restore(sess, checkpoint_file) word_embedding = sess.run(word_embedding_tensor) tf.logging.info("Loaded skip thought embedding matrix of shape %s", word_embedding.shape) return word_embedding def expand_vocabulary(skip_thought_emb, skip_thought_vocab, word2vec_emb, word2vec_vocab): """Runs vocabulary expansion on a skip thought model using a word2vec model. This function trains a linear regression model without regularization to learn a linear mapping from the word2vec embedding space to the skip thought embedding space. The model is then applied to all words in the word2vec vocabulary, yielding "skip thought embeddings" for the union of the two vocabularies. Args: skip_thought_emb: A numpy array of shape [skip_thought_vocab_size, skip_thought_embedding_dim]. skip_thought_vocab: A dictionary of word to id. word2vec_emb: A numpy array of shape [word2vec_vocab_size, word2vec_embedding_dim]. word2vec_vocab: A dictionary of word to id. Returns: combined_emb: A dictionary mapping words to embedding vectors. """ # Find words shared between the two vocabularies. tf.logging.info("Finding shared words") shared_words = [w for w in word2vec_vocab if w in skip_thought_vocab] # Select embedding vectors for shared words. tf.logging.info("Selecting embeddings for %d shared words", len(shared_words)) shared_st_emb = skip_thought_emb[ [skip_thought_vocab[w] for w in shared_words]] shared_w2v_emb = word2vec_emb[[word2vec_vocab[w] for w in shared_words]] # Train a linear regression model on the shared embedding vectors. tf.logging.info("Training linear regression model") model = sklearn.linear_model.LinearRegression() model.fit(shared_w2v_emb, shared_st_emb) # Create the expanded vocabulary. tf.logging.info("Creating embeddings for expanded vocabuary") combined_emb = collections.OrderedDict() for w in word2vec_vocab: # Ignore words with underscores (spaces). if "_" not in w: w_emb = model.predict(word2vec_emb[word2vec_vocab[w]].reshape(1, -1)) combined_emb[w] = w_emb.reshape(-1) for w in skip_thought_vocab: combined_emb[w] = skip_thought_emb[skip_thought_vocab[w]] tf.logging.info("Created expanded vocabulary of %d words", len(combined_emb)) return combined_emb def save_embedding_map(embeddings, output_dir): """Saves a word embedding map. Args: embeddings: A dictionary mapping words to embedding vectors. output_dir: Directory in which to save the dictionary and embedding matrix. """ dictionary = embeddings.keys() embeddings = np.array(embeddings.values()) # Write the dictionary. dictionary_file = os.path.join(output_dir, "dictionary.txt") with tf.gfile.GFile(dictionary_file, "w") as f: f.write("\n".join(dictionary)) tf.logging.info("Wrote dictionary file to %s", dictionary_file) # Write the embeddings. embeddings_file = os.path.join(output_dir, "embeddings.npy") np.save(embeddings_file, embeddings) tf.logging.info("Wrote embeddings file to %s", embeddings_file) def load_vocabulary(filename): """Loads a vocabulary file. Args: filename: Path to text file containing newline separated words. Returns: reverse_vocab: A list mapping word id to word. vocab: A dictionary mapping word to word id. """ tf.logging.info("Reading vocabulary from %s", filename) with tf.gfile.GFile(filename, mode="r") as f: lines = list(f.readlines()) reverse_vocab = [line.decode("utf-8").strip() for line in lines] tf.logging.info("Read vocabulary of size %d", len(reverse_vocab)) vocab = collections.OrderedDict([(w, i) for i, w in enumerate(reverse_vocab)]) return reverse_vocab, vocab def load_embedding_matrix(filename): """Loads an embedding matrix. Args: filename: Path to serialized numpy ndarray of shape [num_words, embedding_dim]. Returns: A numpy ndarray of shape [num_words, embedding_dim]. """ tf.logging.info("Loading embedding matrix from %s", filename) with open(filename, "r") as f: # Note: tf.gfile.GFile doesn't work here because np.load() expects to be # able to call f.seek() with 3 arguments. embedding_matrix = np.load(f) tf.logging.info("Loaded embedding matrix of shape %s", embedding_matrix.shape) return embedding_matrix def create_embedding_map(reverse_vocab, embedding_matrix): """Returns a dictionary mapping word to word embedding.""" return collections.OrderedDict(zip(reverse_vocab, embedding_matrix)) if __name__ == "__main__": tf.app.run() ```
[ { "content": "Return the code exactly, with no changes:\n```python\n#!/usr/bin/python\n\nimport sqlite3\n\nclass Database:\n def __init__(self):\n self.connect()\n\n def connect(self):\n self.conn = sqlite3.connect('lifebot.db')\n\n def get_cursor(self):\n # A bit weird for now but...
[ { "content": "Return the code exactly, with no changes:\n<|memory_start|>```python\n#!/usr/bin/python\n\nimport sqlite3\n\nclass Database:\n def __init__(self):\n self.connect()\n\n def connect(self):\n self.conn = sqlite3.connect('lifebot.db')\n\n def get_cursor(self):\n # A bit w...
```python #!/usr/bin/python import sqlite3 class Database: def __init__(self): self.connect() def connect(self): self.conn = sqlite3.connect('lifebot.db') def get_cursor(self): # A bit weird for now but trying to figure out SQLite try: return self.conn.cursor() except Exception, e: self.connect() return self.conn.cursor() def create_table(self, query): c = self.get_cursor() c.execute(query) self.conn.commit() self.conn.close() def get(self, query, args = None): if args is None: args = tuple() c = self.get_cursor() c.execute(query, args) return c.fetchone() def get_all(self, query, args = None): if args is None: args = tuple() c = self.get_cursor() c.execute(query, args) return c.fetchall() def insert(self, query, args = None): if args is None: args = tuple() c = self.get_cursor() c.execute(query, args) self.conn.commit() return c.lastrowid ```
[ { "content": "Provide a verbatim copy of the code:\n```python\nfrom citrination_client.models.design.constraints.base import BaseConstraint\nfrom citrination_client.base.errors import CitrinationClientError\n\nclass ElementalInclusionConstraint(BaseConstraint):\n \"\"\"\n Constraint class which allows the...
[ { "content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\nfrom citrination_client.models.design.constraints.base import BaseConstraint\nfrom citrination_client.base.errors import CitrinationClientError\n\nclass ElementalInclusionConstraint(BaseConstraint):\n \"\"\"\n Constraint class ...
```python from citrination_client.models.design.constraints.base import BaseConstraint from citrination_client.base.errors import CitrinationClientError class ElementalInclusionConstraint(BaseConstraint): """ Constraint class which allows the assertion that a set of elements is included in the candidate compositions """ def __init__(self, name, elements, logic): """ Constructor. :param name: The name of the column in the data view to which this constraint should be applied :type name: str :param elements: An array of element abbreviations as strings, e.g. ["Mg", "C"] :type elements: list of str :param logic: The logic to apply to the constraint; either "must", "should", or "exclude" :type logic: str """ bad_logic_msg = "ElementalInclusionConstraint must be initialized with the logic parameter equal to \"must\", \"should\", or \"exclude\"" if logic not in ["must", "should", "exclude"]: raise CitrinationClientError(bad_logic_msg) self._name = name self._type = "elementalInclusionConstraint" self._elements = elements self._logic = logic def options(self): return { "elements": self._elements, "logic": self._logic } ```
[ { "content": "Write the code verbatim:\n```python\nfrom datetime import date\n\nfrom cornflake.exceptions import ValidationError\nimport pytest\n\nfrom radar.api.serializers.salt_wasting import SaltWastingClinicalFeaturesSerializer\nfrom radar.models.patient_demographics import PatientDemographics\nfrom radar.m...
[ { "content": "Write the code verbatim:\n<|memory_start|>```python\nfrom datetime import date\n\nfrom cornflake.exceptions import ValidationError\nimport pytest\n\nfrom radar.api.serializers.salt_wasting import SaltWastingClinicalFeaturesSerializer\nfrom radar.models.patient_demographics import PatientDemographi...
```python from datetime import date from cornflake.exceptions import ValidationError import pytest from radar.api.serializers.salt_wasting import SaltWastingClinicalFeaturesSerializer from radar.models.patient_demographics import PatientDemographics from radar.models.patients import Patient from radar.models.users import User @pytest.fixture def patient(): patient = Patient() patient_demographics = PatientDemographics() patient_demographics.date_of_birth = date(2000, 1, 1) patient.patient_demographics.append(patient_demographics) return patient @pytest.fixture def clinical_features(patient): return { 'patient': patient, 'normal_pregnancy': False, 'abnormal_pregnancy_text': 'Foo', 'neurological_problems': True, 'seizures': True, 'abnormal_gait': True, 'deafness': True, 'other_neurological_problem': True, 'other_neurological_problem_text': 'Bar', 'joint_problems': True, 'joint_problems_age': 21, 'x_ray_abnormalities': True, 'chondrocalcinosis': True, 'other_x_ray_abnormality': True, 'other_x_ray_abnormality_text': 'Baz' } def test_valid(clinical_features): obj = valid(clinical_features) assert obj.normal_pregnancy is False assert obj.abnormal_pregnancy_text == 'Foo' assert obj.neurological_problems is True assert obj.seizures is True assert obj.abnormal_gait is True assert obj.deafness is True assert obj.other_neurological_problem is True assert obj.other_neurological_problem_text == 'Bar' assert obj.joint_problems is True assert obj.joint_problems_age == 21 assert obj.x_ray_abnormalities is True assert obj.chondrocalcinosis is True assert obj.other_x_ray_abnormality is True assert obj.other_x_ray_abnormality_text == 'Baz' def test_normal_pregnancy_true(clinical_features): clinical_features['normal_pregnancy'] = True obj = valid(clinical_features) assert obj.abnormal_pregnancy_text is None def test_normal_pregnancy_true_none(clinical_features): clinical_features['normal_pregnancy'] = None valid(clinical_features) def test_normal_pregnancy_true_text_none(clinical_features): clinical_features['normal_pregnancy'] = True clinical_features['abnormal_pregnancy_text'] = None obj = valid(clinical_features) assert obj.abnormal_pregnancy_text is None def test_normal_pregnancy_true_text_blank(clinical_features): clinical_features['normal_pregnancy'] = True clinical_features['abnormal_pregnancy_text'] = '' obj = valid(clinical_features) assert obj.abnormal_pregnancy_text is None def test_normal_pregnancy_false_text_none(clinical_features): clinical_features['abnormal_pregnancy_text'] = None invalid(clinical_features) def test_normal_pregnancy_false_text_blank(clinical_features): clinical_features['abnormal_pregnancy_text'] = '' invalid(clinical_features) def test_neurological_problems_false(clinical_features): obj = valid(clinical_features) obj.seizures = None obj.abnormal_gait = None obj.deafness = None obj.other_neurological_problem = None obj.other_neurological_problem_text = None def test_neurological_problems_none(clinical_features): clinical_features['neurological_problems'] = None valid(clinical_features) def test_neurological_problems_true_seizures_none(clinical_features): clinical_features['seizures'] = None invalid(clinical_features) def test_neurological_problems_false_seizures_none(clinical_features): clinical_features['neurological_problems'] = False clinical_features['seizures'] = None valid(clinical_features) def test_neurological_problems_true_abnormal_gait_none(clinical_features): clinical_features['abnormal_gait'] = None invalid(clinical_features) def test_neurological_problems_false_abnormal_gait_none(clinical_features): clinical_features['neurological_problems'] = False clinical_features['abnormal_gait'] = None valid(clinical_features) def test_neurological_problems_true_deafness_none(clinical_features): clinical_features['deafness'] = None invalid(clinical_features) def test_neurological_problems_false_deafness_none(clinical_features): clinical_features['neurological_problems'] = False clinical_features['deafness'] = None valid(clinical_features) def test_neurological_problems_true_other_neurological_problem_none(clinical_features): clinical_features['other_neurological_problem'] = None invalid(clinical_features) def test_other_neurological_problem_false_text_none(clinical_features): clinical_features['other_neurological_problem'] = False clinical_features['other_neurological_problem_text'] = None valid(clinical_features) def test_other_neurological_problem_true_text_blank(clinical_features): clinical_features['other_neurological_problem_text'] = '' invalid(clinical_features) def test_other_neurological_problem_true_text_none(clinical_features): clinical_features['other_neurological_problem_text'] = None invalid(clinical_features) def test_joint_problems_false(clinical_features): clinical_features['joint_problems'] = False obj = valid(clinical_features) assert obj.joint_problems_age is None assert obj.x_ray_abnormalities is None assert obj.chondrocalcinosis is None assert obj.other_x_ray_abnormality is None assert obj.other_x_ray_abnormality_text is None def test_joint_problems_none(clinical_features): clinical_features['neurological_problems'] = None valid(clinical_features) def test_joint_problems_true_joint_problems_age_none(clinical_features): clinical_features['joint_problems_age'] = None invalid(clinical_features) def test_joint_problems_false_joint_problems_age_none(clinical_features): clinical_features['joint_problems'] = False clinical_features['joint_problems_age'] = None valid(clinical_features) def test_joint_problems_true_joint_problems_age_too_young(clinical_features): clinical_features['joint_problems_age'] = -1 invalid(clinical_features) def test_joint_problems_true_joint_problems_age_too_old(clinical_features): clinical_features['x_ray_abnormalities'] = 121 invalid(clinical_features) def test_joint_problems_true_x_ray_abnormalities_none(clinical_features): clinical_features['x_ray_abnormalities'] = None invalid(clinical_features) def test_joint_problems_false_x_ray_abnormalities_none(clinical_features): clinical_features['joint_problems'] = False clinical_features['x_ray_abnormalities'] = None valid(clinical_features) def test_joint_problems_true_chondrocalcinosis_none(clinical_features): clinical_features['chondrocalcinosis'] = None invalid(clinical_features) def test_joint_problems_false_chondrocalcinosis_none(clinical_features): clinical_features['joint_problems'] = False clinical_features['chondrocalcinosis'] = None valid(clinical_features) def test_joint_problems_true_other_x_ray_abnormality_none(clinical_features): clinical_features['other_x_ray_abnormality'] = None invalid(clinical_features) def test_joint_problems_false_other_x_ray_abnormality_none(clinical_features): clinical_features['joint_problems'] = False clinical_features['other_x_ray_abnormality'] = None valid(clinical_features) def invalid(data): with pytest.raises(ValidationError) as e: valid(data) return e def valid(data): serializer = SaltWastingClinicalFeaturesSerializer(data=data, context={'user': User(is_admin=True)}) serializer.is_valid(raise_exception=True) return serializer.save() ```
[ { "content": "Here is a code snippet:\n```python\n# -*- coding: utf-8 -*-\n# This file is part of Shoop.\n#\n# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.\n#\n# This source code is licensed under the AGPLv3 license found in the\n# LICENSE file in the root directory of this source tree.\nimport os\n...
[ { "content": "Here is a code snippet:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# This file is part of Shoop.\n#\n# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.\n#\n# This source code is licensed under the AGPLv3 license found in the\n# LICENSE file in the root directory of this source tr...
```python # -*- coding: utf-8 -*- # This file is part of Shoop. # # Copyright (c) 2012-2015, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. import os from django.core.exceptions import ImproperlyConfigured from shoop.utils.setup import Setup from . import base_settings def configure(setup): base_settings.configure(setup) local_settings_file = os.getenv('LOCAL_SETTINGS_FILE') # Backward compatibility: Find from current directory, if # LOCAL_SETTINGS_FILE environment variables is unset if local_settings_file is None: cand = os.path.join(os.path.dirname(__file__), 'local_settings.py') if os.path.exists(cand): local_settings_file = cand # Load local settings from file if local_settings_file: local_settings_ns = { '__file__': local_settings_file, } with open(local_settings_file, 'rb') as fp: compiled = compile(fp.read(), local_settings_file, 'exec') exec(compiled, local_settings_ns) if 'configure' not in local_settings_ns: raise ImproperlyConfigured('No configure in local_settings') local_configure = local_settings_ns['configure'] local_configure(setup) return setup globals().update(Setup.configure(configure)) ```
[ { "content": "Here is some code:\n```python\n'''function [out] = imrotateFill(inp, angle)\n% function [out] = imrotateFill(inp)\n% Rotates an 2D image couterclockwise by angle in degrees\n% Output image has the same dimension as input.\n% Undefined regions are filled in by repeating the original image\n% Note:...
[ { "content": "Here is some code:\n<|memory_start|>```python\n'''function [out] = imrotateFill(inp, angle)\n% function [out] = imrotateFill(inp)\n% Rotates an 2D image couterclockwise by angle in degrees\n% Output image has the same dimension as input.\n% Undefined regions are filled in by repeating the origina...
```python '''function [out] = imrotateFill(inp, angle) % function [out] = imrotateFill(inp) % Rotates an 2D image couterclockwise by angle in degrees % Output image has the same dimension as input. % Undefined regions are filled in by repeating the original image % Note: input images must be square % % Copyright (c) UWM, Peter Schwander Mar. 20, 2014 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% version = 'imrotateFill, V0.9'; Ported to python. Hstau Liao Oct. 2016 ''' import numpy as np import logging,sys import math from scipy.ndimage.interpolation import rotate import matplotlib.pyplot as plt def op(input, angle, visual=False): nPix = input.shape[0] inpRep = np.tile(input, (3, 3)) outRep = rotate(inpRep, angle, reshape=False) out = outRep[nPix:2 * nPix, nPix:2 * nPix] if visual: plt.subplot(2, 2, 1) plt.imshow(input,cmap = plt.get_cmap('gray')) plt.title('Input') plt.subplot(2, 2, 2) plt.imshow(out, cmap=plt.get_cmap('gray')) plt.title('Output') plt.subplot(2, 2, 3) plt.imshow(inpRep, cmap=plt.get_cmap('gray')) plt.title('Input 3x3') plt.subplot(2, 2, 4) plt.imshow(outRep, cmap=plt.get_cmap('gray')) plt.title('Output 3x3') plt.show() return out if __name__ == '__main__': # tested using a 6x6 image img = np.loadtxt(sys.argv[1]) ang = float(sys.argv[2]) # in degrees visual = bool(sys.argv[3]) result = op(img,ang,visual) ```
[ { "content": "Here is a code file:\n```python\n\"\"\"Settings that need to be set in order to run the tests.\"\"\"\nimport os\n\n\nDEBUG = True\nSITE_ID = 1\n\nAPP_ROOT = os.path.abspath(\n os.path.join(os.path.dirname(__file__), '..'))\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backe...
[ { "content": "Here is a code file:\n<|memory_start|>```python\n\"\"\"Settings that need to be set in order to run the tests.\"\"\"\nimport os\n\n\nDEBUG = True\nSITE_ID = 1\n\nAPP_ROOT = os.path.abspath(\n os.path.join(os.path.dirname(__file__), '..'))\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': ...
```python """Settings that need to be set in order to run the tests.""" import os DEBUG = True SITE_ID = 1 APP_ROOT = os.path.abspath( os.path.join(os.path.dirname(__file__), '..')) DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:', } } ROOT_URLCONF = 'active_users.tests.urls' STATIC_URL = '/static/' STATIC_ROOT = os.path.join(APP_ROOT, '../app_static') MEDIA_ROOT = os.path.join(APP_ROOT, '../app_media') STATICFILES_DIRS = ( os.path.join(APP_ROOT, 'static'), ) TEMPLATES = [{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'APP_DIRS': True, 'DIRS': [os.path.join(APP_ROOT, 'tests/test_app/templates')], 'OPTIONS': { 'context_processors': ( 'django.contrib.auth.context_processors.auth', 'django.template.context_processors.request', ) } }] EXTERNAL_APPS = [ 'django.contrib.admin', 'django.contrib.admindocs', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.messages', 'django.contrib.sessions', 'django.contrib.staticfiles', 'django.contrib.sitemaps', 'django.contrib.sites', ] INTERNAL_APPS = [ 'active_users', 'active_users.tests.test_app', ] INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS MIDDLEWARE_CLASSES = [ 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ] SECRET_KEY = 'foobar' ```
[ { "content": "Write the code verbatim:\n```python\nimport json\nimport jwt\nimport logging\nimport os\nimport uuid\nimport signal\nimport sys\nimport traceback\n\nfrom tornado import ioloop, web, auth, httpserver, gen, escape, log\nfrom tornado.options import options, parse_command_line, define\n\nfrom problem ...
[ { "content": "Write the code verbatim:\n<|memory_start|>```python\nimport json\nimport jwt\nimport logging\nimport os\nimport uuid\nimport signal\nimport sys\nimport traceback\n\nfrom tornado import ioloop, web, auth, httpserver, gen, escape, log\nfrom tornado.options import options, parse_command_line, define\...
```python import json import jwt import logging import os import uuid import signal import sys import traceback from tornado import ioloop, web, auth, httpserver, gen, escape, log from tornado.options import options, parse_command_line, define from problem import Problem from contest import Contest from judge import Judge from data_uri import DataURI SRC_DIR = os.path.dirname(__file__) define('redirect_url', help='Google OAuth2 Redirect URL', type=str) define('client_id', help='Google OAuth2 Client ID', type=str) define('client_secret', help='Google OAuth2 Client Secret', type=str) define('admin_whitelist', help='emails of admins', type=str, multiple=True) define('guest_whitelist', help='emails of guests', type=str, multiple=True) define('port', default=8000, help='start on the given port', type=int) define('contest_dir', default='contest', help='path to the contest files', type=str) define('delay', default=15*60, help='delay (in seconds) before starting the contest', type=int) define('admin_only', default=False, help='only allow admins to view site', type=bool) define('minutes_in', default=0, help='minutes into the contest', type=int) define('num_judges', default=4, help='number of judge threads', type=int) class BaseHandler(web.RequestHandler): def get_current_user(self): user_json = self.get_secure_cookie('utacm_contest_user', max_age_days=1) return escape.json_decode(user_json) if user_json else None def current_user_id(self): cookie = self.current_user return (cookie['email'], cookie['name'], cookie['sub']) def current_user_pretty(self): return "%s (%s)" % self.current_user_id()[:2] def is_admin(self): return self.current_user_id()[0] in options.admin_whitelist def is_guest(self): return self.current_user_id()[0] in options.guest_whitelist class AuthLoginHandler(BaseHandler, auth.GoogleOAuth2Mixin): @gen.coroutine def get(self): if self.get_argument('code', False): user = yield self.get_authenticated_user( redirect_uri=self.settings['google_redirect_url'], code=self.get_argument('code')) if not user: self.clear_all_cookies() raise web.HTTPError(500, 'Google authentication failed') self.xsrf_token access_token = str(user['access_token']) user_info = jwt.decode(user['id_token'], verify=False) http_client = self.get_auth_http_client() response = yield http_client.fetch('https://www.googleapis.com/oauth2/v1/userinfo?access_token='+access_token) if not response: self.clear_all_cookies() raise web.HTTPError(500, 'Google authentication failed') user = json.loads(response.body) if options.admin_only and user['email'] not in options.admin_whitelist: logger.warn("%s (%s) attempted to sign in (admin only mode)" % (user["name"], user["email"])) raise web.HTTPError(403, 'Contest is running in admin only mode.') user['sub'] = user_info['sub'] self.set_secure_cookie('utacm_contest_user', escape.json_encode(user), expires_days=1) logger.info("%s (%s) signed in" % (user["name"], user["email"])) self.redirect('/') return elif self.get_secure_cookie('utacm_contest_user'): self.redirect('/') return else: yield self.authorize_redirect( redirect_uri=self.settings['google_redirect_url'], client_id=self.settings['google_oauth']['key'], scope=['profile', 'email'], response_type='code', extra_params={'approval_prompt': 'auto'}) class AuthLogoutHandler(BaseHandler): def get(self): self.clear_cookie('utacm_contest_user') self.write("You are now logged out.") class IndexHandler(BaseHandler): @web.authenticated def get(self): if contest.is_running() or contest.is_over(): self.render('contest.html', admin=self.is_admin()) else: self.render('pre-contest.html', admin=self.is_admin()) class MetadataHandler(BaseHandler): @web.authenticated def get(self): if not contest.is_running() and not contest.is_over(): raise web.HTTPError(503) data = { 'langs': Judge.lang_run.keys(), 'prob_ids': contest_cfg['prob_ids'], 'prob_contents': {prob_id: problems[prob_id].content for prob_id in contest_cfg['prob_ids']}, 'verdicts': Contest.verdicts, 'solved': contest.get_solved(self.current_user_id()) } self.set_header('Content-Type', 'application/json') self.write(json.dumps(data)) class UpdatesHandler(BaseHandler): @web.authenticated def get(self): updates = {'remaining_time': contest.remaining_time()} scoreboard = [list(person) for person in contest.get_scoreboard(live=self.is_admin())] for person in scoreboard: if person[0][0] in options.guest_whitelist: person[0] = list(person[0]) person[0][1] = '%s %s' % (person[0][1], '(Guest)') if contest.is_running() or contest.is_over(): updates['scoreboard'] = scoreboard updates['solved'] = contest.get_solved(self.current_user_id()) updates['submissions'] = contest.get_submissions(self.current_user_id(), is_admin=self.is_admin()) updates['clarifications'] = contest.get_clarifs(self.current_user_id(), is_admin=self.is_admin()) self.set_header('Content-Type', 'application/json') self.write(json.dumps(updates)) class SubmitSolutionHandler(BaseHandler): @web.authenticated def post(self, prob_id): # Requests a solution be graded # Body should contain: source code, output # Verify valid prob_id # Check permit, return error if expired # Dispatch to judge, return True or False based on accepted or not if not contest.is_running(): raise web.HTTPError(503) if prob_id not in contest_cfg['prob_ids']: raise web.HTTPError(404) user_id = self.current_user_id() try: lang = self.get_argument('lang')[:32] filename = self.get_argument('filename')[:32] source = DataURI(self.get_argument('sourceFile')).data except: raise web.HTTPError(400) logger.info('%s requests judgement for a submission (%s, %s, %s)' % (self.current_user_pretty(), filename, lang, prob_id)) result, err = judge.enqueue_submission(user_id, prob_id, lang, filename, source) logger.info('Submission successfully added to judge queue' if result else 'Failed to add to judge queue') self.set_header('Content-Type', 'application/json') self.write(json.dumps((result, err))) class SubmitClarificationHandler(BaseHandler): @web.authenticated def post(self, prob_id): if not contest.is_running(): raise web.HTTPError(503) if prob_id not in contest_cfg['prob_ids']: raise web.HTTPError(404) user_id = self.current_user_id() message = self.get_argument('content').strip() if not message: raise web.HTTPError(400) logger.info('%s requests clarification for problem %s' % (self.current_user_pretty(), prob_id)) logger.debug('Clarification: ' + message) contest.submit_clarif(user_id, prob_id, message) self.set_header('Content-Type', 'application/json') self.write(json.dumps(True)) class OutputFileHandler(BaseHandler): @web.authenticated def get(self, value): if not self.is_admin(): raise web.HTTPError(404) try: subm_id = int(value) except: raise web.HTTPError(400) output_log = contest.get_output_log(subm_id) if not output_log: raise web.HTTPError(404) self.set_header('Content-Type', 'text/html') self.write('<pre>') self.write(escape.xhtml_escape(str(output_log))) self.write('</pre>') class ErrorFileHandler(BaseHandler): @web.authenticated def get(self, value): user_id = self.current_user_id() try: subm_id = int(value) except: raise web.HTTPError(400) error_log = contest.get_error_log(user_id, subm_id, is_admin=self.is_admin()) if not error_log: raise web.HTTPError(404) self.set_header('Content-Type', 'text/html') self.write('<pre>') self.write(escape.xhtml_escape(str(error_log))) self.write('</pre>') class LogHandler(BaseHandler): @web.authenticated def get(self, value): if not self.is_admin(): raise web.HTTPError(404) self.set_header('Content-Type', 'text/html') self.write("<pre>") try: server_log_path = os.path.join(SRC_DIR, "server_log.txt") with open(server_log_path, 'r') as in_file: lines = [line.decode('utf-8') for line in in_file.readlines()] lines = [line for line in lines if all([v in line for v in value.split('/')])] self.write(escape.xhtml_escape(''.join(lines))) except: logger.error("unable to read log: %s" % (traceback.format_exception(*sys.exc_info()),)) self.write("unable to read log") self.write("</pre>") class AdminHandler(BaseHandler): @web.authenticated def get(self, value): if not self.is_admin(): raise web.HTTPError(404) if value != 'updates': raise web.HTTPError(400) updates = {'frozen': contest.is_frozen(), 'guest_whitelist': options.guest_whitelist, 'whitelist': options.admin_whitelist, 'clarifs': contest.get_clarifs(-1)} self.set_header('Content-Type', 'application/json') self.write(json.dumps(updates)) @web.authenticated def post(self, put_type): if not self.is_admin(): raise web.HTTPError(404) if put_type == 'rejudge': self.rejudge() elif put_type == 'clear': self.clear_cache() elif put_type == 'frozen': self.change_state() elif put_type == 'guest_whitelist': self.add_to_guest_whitelist() elif put_type == 'whitelist': self.add_to_whitelist() elif put_type == 'add_time': self.add_time() elif put_type == 'clarifications': self.respond_to_clarification() elif put_type == 'clarification': self.post_global_clarification() elif put_type == 'override': self.override_result() else: raise web.HTTPError(400) def rejudge(self): for prob in problems.values(): prob.reload_files() judge.rejudge_all() self.write(json.dumps(True)) def clear_cache(self): for prob in problems.values(): prob.reload_files() self.write(json.dumps(True)) def change_state(self): new_state = '' try: new_state = self.get_argument('state') except Exception: raise web.HTTPError(400) if new_state == 'unfreeze': contest.freeze_scoreboard(False) else: contest.freeze_scoreboard(True) self.write(json.dumps(True)) def override_result(self): try: subm_id = int(self.get_argument('subm_id')) result = self.get_argument('result') if result in Contest.verdicts: contest.change_submission(subm_id, result) self.write(json.dumps(True)) except: raise web.HTTPError(400) def add_to_whitelist(self): newAdmin = '' try: new_admin = self.get_argument('newAdmin') except Exception: raise web.HTTPError(400) if new_admin not in options.admin_whitelist: options.admin_whitelist.append(new_admin) self.write(json.dumps(True)) def add_to_guest_whitelist(self): newGuest = '' try: new_guest = self.get_argument('newGuest') except Exception: raise web.HTTPError(400) if new_guest not in options.guest_whitelist: options.guest_whitelist.append(new_guest) self.write(json.dumps(True)) def add_time(self): num_min = 0; try: num_min = int(self.get_argument('numMin')) except Exception: raise web.HTTPError(400) contest.extend(num_min * 60) self.write(json.dumps(True)) def respond_to_clarification(self): option = 0; clarif_id = '' try: option = int(self.get_argument('respNum')) clarif_id = int(self.get_argument('clarifNum')) except Exception: raise web.HTTPError(400) if option == 0: contest.respond_clarif(clarif_id, 'Reread the problem statement.') self.write(json.dumps(True)) elif option == 1: contest.respond_clarif(clarif_id, 'Come talk to the administrators.') self.write(json.dumps(True)) elif option == 2: resp_string = '' try: resp_string = self.get_argument('response') except Exception: raise web.HTTPError(400) contest.respond_clarif(clarif_id, resp_string, False) self.write(json.dumps(True)) else: raise web.HTTPError(400) def post_global_clarification(self): prob_id = 0; resp_string = '' try: prob_id = self.get_argument('probId') resp_string = self.get_argument('response') except Exception: raise web.HTTPError(400) contest.create_global_clarif(prob_id, resp_string) self.write(json.dumps(True)) def init_loggers(): access_log_path = os.path.join(SRC_DIR, "access_log.txt") handler_access = logging.FileHandler(access_log_path) handler_access.setFormatter(log.LogFormatter()) logging.getLogger('tornado.access').addHandler(handler_access) server_log_path = os.path.join(SRC_DIR, "server_log.txt") handler_server = logging.FileHandler(server_log_path) handler_server.setFormatter(log.LogFormatter()) logger.addHandler(handler_server) logger.setLevel(logging.DEBUG) logger.info("Starting up server") if __name__ == '__main__': parse_command_line() logger = logging.getLogger(__name__) init_loggers() logger.info("Loading contest configuration") with open(os.path.join(options.contest_dir, 'config.txt'), 'r') as in_file: contest_cfg = eval(in_file.read()) seconds = contest_cfg['duration'] % 60 minutes = contest_cfg['duration'] / 60 % 60 hours = contest_cfg['duration'] / 60 / 60 % 60 logger.debug("Duration: %02d:%02d:%02d" % (hours, minutes, seconds)) logger.debug("Problems: " + str(contest_cfg['prob_ids'])) logger.debug("Penalty: %d points / wrong submission" % contest_cfg['penalty']) problems = {prob_id: Problem(prob_id, options.contest_dir, logger) for prob_id in contest_cfg['prob_ids']} contest = Contest(options.delay, contest_cfg['duration'], options.minutes_in, contest_cfg['prob_ids'], contest_cfg['penalty'], logger) judge = Judge(contest, problems, options.contest_dir, options.num_judges, logger) application = web.Application( [ (r'/', IndexHandler), (r'/index.html', IndexHandler), (r'/auth/login', AuthLoginHandler), (r'/auth/logout', AuthLogoutHandler), (r'/api/v1/admin/(.*)', AdminHandler), (r'/api/v1/log/(.*)', LogHandler), (r'/api/v1/metadata', MetadataHandler), (r'/api/v1/updates', UpdatesHandler), (r'/api/v1/submit/(.*)/solution', SubmitSolutionHandler), (r'/api/v1/submit/(.*)/clarification', SubmitClarificationHandler), (r'/api/v1/outputs/(.*)', OutputFileHandler), (r'/api/v1/errors/(.*)', ErrorFileHandler), ], cookie_secret=str(uuid.uuid4()), login_url='/auth/login', template_path=os.path.join(os.path.dirname(__file__), 'templates'), static_path=os.path.join(os.path.dirname(__file__), 'static'), xsrf_cookies=True, debug=False, google_redirect_url=options.redirect_url, google_oauth={'key': options.client_id, 'secret': options.client_secret}, compiled_template_cache=False, ) application.listen( port=options.port, max_buffer_size=40*1024, ) signal.signal(signal.SIGUSR1, lambda x, y: judge.halt_judging()) logger.info("Setup complete, starting IOLoop") try: ioloop.IOLoop.instance().start() except KeyboardInterrupt: logger.info("Server halted by ^C, shutting down judger") except Exception as e: logger.critical("Server crashed: %s" % e.message) finally: judge.halt_judging() ```
[ { "content": "Recreate the entire code block with identical formatting:\n```python\n'''\r\nStrava club information is not publicly available. To get the club information\r\n the club owner needs to be logged in. This script takes the HTML file of the\r\n strava club page and extracts all the users/athletes.\r\n...
[ { "content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n'''\r\nStrava club information is not publicly available. To get the club information\r\n the club owner needs to be logged in. This script takes the HTML file of the\r\n strava club page and extracts all the use...
```python ''' Strava club information is not publicly available. To get the club information the club owner needs to be logged in. This script takes the HTML file of the strava club page and extracts all the users/athletes. ''' from bs4 import BeautifulSoup import argparse def get_group_members_from_html(html_in): with open(html_in, 'rb') as fin: soup = BeautifulSoup(fin) scripts = soup.find_all('script') for jscript in scripts: text = jscript.get_text() if 'members:' in text: junk, tail = text.split('members:') last_char = tail.find(']]') first_char = tail.find('[[') member_list = tail[first_char:last_char+2] mem_array_raw = eval(member_list) mem_array = [] for member in mem_array_raw: mem_array.append([member[0], member[1].decode('unicode_escape').encode('utf8')]) return mem_array return [] if __name__ == "__main__": parser = argparse.ArgumentParser(description='Get the year to date totals for a list of users in strava.') parser.add_argument('--html', dest='html_file', required=True, help='A saved copy of the group strava page when logged in.') parser.add_argument('-o', '--out', dest='out_file', default='club_members.csv', help='Output CSV file name, default value is club_members.csv') args = parser.parse_args() html_file = args.html_file out_file = args.out_file with open (out_file, 'wb') as fout: members = get_group_members_from_html(html_file) for member in members: line = str(member[0]) + ',' + member[1] + '\n' fout.write(line) with open (out_file, 'r') as f: for line in f: print line.strip() ```
[ { "content": "```python\n#!/usr/bin/env python\n#************************************************************************\n# Codelet Tuning Infrastructure\n# Copyright (C) 2010-2015 Intel Corporation, CEA, GENCI, and UVSQ\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the...
[ { "content": "<|memory_start|>```python\n#!/usr/bin/env python\n#************************************************************************\n# Codelet Tuning Infrastructure\n# Copyright (C) 2010-2015 Intel Corporation, CEA, GENCI, and UVSQ\n#\n# This program is free software: you can redistribute it and/or modify...
```python #!/usr/bin/env python #************************************************************************ # Codelet Tuning Infrastructure # Copyright (C) 2010-2015 Intel Corporation, CEA, GENCI, and UVSQ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #************************************************************************ # Authors: Franck Talbart, Mathieu Bordet, Nicolas Petit # This script generates the dynamic MAQAO CQA schema import cti, ctr from cti_hapi import entry, util import json, sys, os, subprocess uid = cti.CTI_UID(sys.argv[1]) path_plugin = ctr.ctr_plugin_get_path_by_uid(cti.CTR_ENTRY_PLUGIN, uid) #--------------------------------------------------------------------------- def update_file(filename, cmd, result): infile, outfile = entry.load_defaults(uid) if not outfile["init"].params.has_key(result["name"]): file_input = open(filename) ji = json.load(file_input) with open(filename, 'w') as outfile: ji[cmd]["params"].insert(4, result) json.dump(ji, outfile, indent=4) #--------------------------------------------------------------------------- maqao_bin = os.path.join(cti.cti_plugin_config_get_value(cti.THIRD_PARTY_DIR), "maqao", "maqao") sys.stdout.flush() child = subprocess.Popen([maqao_bin, "cqa", "-cti", "-ext"], shell=False) output = child.communicate() rc = child.returncode if rc != 0: raise Exception("Error with MAQAO CQA while getting the JSON file") file_js = open("x86.json") jr = json.load(file_js) result = { cti.META_ATTRIBUTE_NAME: "matrix_loop_results", cti.META_ATTRIBUTE_TYPE: "MATRIX", cti.META_ATTRIBUTE_DESC: "MAQAO CQA loop results.", cti.META_ATTRIBUTE_LONG_DESC: "The MAQAO CQA loop results from the associated compilation.", cti.META_ATTRIBUTE_MATRIX_COLUMN_NAMES: [], cti.META_ATTRIBUTE_MATRIX_COLUMN_TYPES: [], cti.META_ATTRIBUTE_MATRIX_COLUMN_DESCS: [], cti.META_ATTRIBUTE_MATRIX_COLUMN_LONG_DESCS: [], cti.META_ATTRIBUTE_OPTIONAL: True, cti.META_ATTRIBUTE_MATRIX_GENERATED: True } for archi_k, archi_v in jr.items(): #Moving the metrics into a list in order to sort them metric_list = [] for metric_k, metric_v in archi_v.items(): metric_list.append({ cti.META_ATTRIBUTE_NAME: metric_k, cti.META_ATTRIBUTE_TYPE: metric_v["type"], cti.META_ATTRIBUTE_DESC: metric_v["description"] }) sorted_metrics = sorted(metric_list, key=lambda k: k[cti.META_ATTRIBUTE_NAME]) for m in sorted_metrics: param = util.column_name_replace(m[cti.META_ATTRIBUTE_NAME].strip()) result[cti.META_ATTRIBUTE_MATRIX_COLUMN_NAMES].append("%s_%s" % (archi_k, param)) result[cti.META_ATTRIBUTE_MATRIX_COLUMN_TYPES].append(m[cti.META_ATTRIBUTE_TYPE]) result[cti.META_ATTRIBUTE_MATRIX_COLUMN_DESCS].append(m[cti.META_ATTRIBUTE_NAME]) result[cti.META_ATTRIBUTE_MATRIX_COLUMN_LONG_DESCS].append(m[cti.META_ATTRIBUTE_DESC]) if archi_k == "common": #Adding port balancing column result[cti.META_ATTRIBUTE_MATRIX_COLUMN_NAMES].append("%s_port_balancing" % archi_k) result[cti.META_ATTRIBUTE_MATRIX_COLUMN_TYPES].append("TEXT") result[cti.META_ATTRIBUTE_MATRIX_COLUMN_DESCS].append("The port balancing") result[cti.META_ATTRIBUTE_MATRIX_COLUMN_LONG_DESCS].append("The port balancing.") #Adding multiple path column result[cti.META_ATTRIBUTE_MATRIX_COLUMN_NAMES].append("%s_is_multiple_path" % archi_k) result[cti.META_ATTRIBUTE_MATRIX_COLUMN_TYPES].append("BOOLEAN") result[cti.META_ATTRIBUTE_MATRIX_COLUMN_DESCS].append("Is multiple path") result[cti.META_ATTRIBUTE_MATRIX_COLUMN_LONG_DESCS].append("True if the loop has multiple path.") update_file(os.path.join(path_plugin, cti.cti_plugin_config_get_value(cti.PLUGIN_DEFAULT_INPUT_FILENAME)), "update", result) update_file(os.path.join(path_plugin, cti.cti_plugin_config_get_value(cti.PLUGIN_DEFAULT_OUTPUT_FILENAME)), "init", result) os.remove("x86.json") ```
[ { "content": "Here is the code block:\n```python\n# Copyright (c) 2012 OpenStack Foundation.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\...
[ { "content": "Here is the code block:\n<|memory_start|>```python\n# Copyright (c) 2012 OpenStack Foundation.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of th...
```python # Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from neutron_lib.api import converters from neutron_lib import constants from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_db import exception as db_exc from oslo_policy import policy as oslo_policy from oslo_utils import uuidutils import six import six.moves.urllib.parse as urlparse import webob from webob import exc import webtest from neutron.api import api_common from neutron.api import extensions from neutron.api.v2 import attributes from neutron.api.v2 import base as v2_base from neutron.api.v2 import router from neutron.callbacks import registry from neutron import context from neutron import manager from neutron import policy from neutron import quota from neutron.quota import resource_registry from neutron.tests import base from neutron.tests import fake_notifier from neutron.tests import tools from neutron.tests.unit import testlib_api EXTDIR = os.path.join(base.ROOTDIR, 'unit/extensions') _uuid = uuidutils.generate_uuid def _get_path(resource, id=None, action=None, fmt=None): path = '/%s' % resource if id is not None: path = path + '/%s' % id if action is not None: path = path + '/%s' % action if fmt is not None: path = path + '.%s' % fmt return path class ResourceIndexTestCase(base.BaseTestCase): def test_index_json(self): index = webtest.TestApp(router.Index({'foo': 'bar'})) res = index.get('') self.assertIn('resources', res.json) self.assertEqual(1, len(res.json['resources'])) resource = res.json['resources'][0] self.assertIn('collection', resource) self.assertEqual('bar', resource['collection']) self.assertIn('name', resource) self.assertEqual('foo', resource['name']) self.assertIn('links', resource) self.assertEqual(1, len(resource['links'])) link = resource['links'][0] self.assertIn('href', link) self.assertEqual(link['href'], 'http://localhost/bar') self.assertIn('rel', link) self.assertEqual('self', link['rel']) class APIv2TestBase(base.BaseTestCase): def setUp(self): super(APIv2TestBase, self).setUp() plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2' # Ensure existing ExtensionManager is not used extensions.PluginAwareExtensionManager._instance = None # Create the default configurations self.config_parse() # Update the plugin self.setup_coreplugin(plugin) cfg.CONF.set_override('allow_pagination', True) cfg.CONF.set_override('allow_sorting', True) self._plugin_patcher = mock.patch(plugin, autospec=True) self.plugin = self._plugin_patcher.start() instance = self.plugin.return_value instance._NeutronPluginBaseV2__native_pagination_support = True instance._NeutronPluginBaseV2__native_sorting_support = True api = router.APIRouter() self.api = webtest.TestApp(api) quota.QUOTAS._driver = None cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') # APIRouter initialization resets policy module, re-initializing it policy.init() class _ArgMatcher(object): """An adapter to assist mock assertions, used to custom compare.""" def __init__(self, cmp, obj): self.cmp = cmp self.obj = obj def __eq__(self, other): return self.cmp(self.obj, other) def _list_cmp(l1, l2): return set(l1) == set(l2) class APIv2TestCase(APIv2TestBase): def _do_field_list(self, resource, base_fields): attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[resource] policy_attrs = [name for (name, info) in attr_info.items() if info.get('required_by_policy')] for name, info in attr_info.items(): if info.get('primary_key'): policy_attrs.append(name) fields = base_fields fields.extend(policy_attrs) return fields def _get_collection_kwargs(self, skipargs=None, **kwargs): skipargs = skipargs or [] args_list = ['filters', 'fields', 'sorts', 'limit', 'marker', 'page_reverse'] args_dict = dict( (arg, mock.ANY) for arg in set(args_list) - set(skipargs)) args_dict.update(kwargs) return args_dict def test_fields(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'fields': 'foo'}) fields = self._do_field_list('networks', ['foo']) kwargs = self._get_collection_kwargs(fields=fields) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_fields_multiple(self): instance = self.plugin.return_value instance.get_networks.return_value = [] fields = self._do_field_list('networks', ['foo', 'bar']) self.api.get(_get_path('networks'), {'fields': ['foo', 'bar']}) kwargs = self._get_collection_kwargs(fields=fields) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_fields_multiple_with_empty(self): instance = self.plugin.return_value instance.get_networks.return_value = [] fields = self._do_field_list('networks', ['foo']) self.api.get(_get_path('networks'), {'fields': ['foo', '']}) kwargs = self._get_collection_kwargs(fields=fields) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_fields_empty(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'fields': ''}) kwargs = self._get_collection_kwargs(fields=[]) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_fields_multiple_empty(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'fields': ['', '']}) kwargs = self._get_collection_kwargs(fields=[]) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': 'bar'}) filters = {'name': ['bar']} kwargs = self._get_collection_kwargs(filters=filters) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_empty(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': ''}) filters = {} kwargs = self._get_collection_kwargs(filters=filters) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_multiple_empty(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': ['', '']}) filters = {} kwargs = self._get_collection_kwargs(filters=filters) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_multiple_with_empty(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': ['bar', '']}) filters = {'name': ['bar']} kwargs = self._get_collection_kwargs(filters=filters) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_multiple_values(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': ['bar', 'bar2']}) filters = {'name': ['bar', 'bar2']} kwargs = self._get_collection_kwargs(filters=filters) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_multiple(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': 'bar', 'tenant_id': 'bar2'}) filters = {'name': ['bar'], 'tenant_id': ['bar2']} kwargs = self._get_collection_kwargs(filters=filters) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_with_fields(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': 'bar', 'fields': 'foo'}) filters = {'name': ['bar']} fields = self._do_field_list('networks', ['foo']) kwargs = self._get_collection_kwargs(filters=filters, fields=fields) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_with_convert_to(self): instance = self.plugin.return_value instance.get_ports.return_value = [] self.api.get(_get_path('ports'), {'admin_state_up': 'true'}) filters = {'admin_state_up': [True]} kwargs = self._get_collection_kwargs(filters=filters) instance.get_ports.assert_called_once_with(mock.ANY, **kwargs) def test_filters_with_convert_list_to(self): instance = self.plugin.return_value instance.get_ports.return_value = [] self.api.get(_get_path('ports'), {'fixed_ips': ['ip_address=foo', 'subnet_id=bar']}) filters = {'fixed_ips': {'ip_address': ['foo'], 'subnet_id': ['bar']}} kwargs = self._get_collection_kwargs(filters=filters) instance.get_ports.assert_called_once_with(mock.ANY, **kwargs) def test_limit(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'limit': '10'}) kwargs = self._get_collection_kwargs(limit=10) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_limit_with_great_than_max_limit(self): cfg.CONF.set_default('pagination_max_limit', '1000') instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'limit': '1001'}) kwargs = self._get_collection_kwargs(limit=1000) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_limit_with_zero(self): cfg.CONF.set_default('pagination_max_limit', '1000') instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'limit': '0'}) kwargs = self._get_collection_kwargs(limit=1000) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_limit_with_unspecific(self): cfg.CONF.set_default('pagination_max_limit', '1000') instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks')) kwargs = self._get_collection_kwargs(limit=1000) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_limit_with_negative_value(self): cfg.CONF.set_default('pagination_max_limit', '1000') instance = self.plugin.return_value instance.get_networks.return_value = [] res = self.api.get(_get_path('networks'), {'limit': -1}, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_limit_with_non_integer(self): instance = self.plugin.return_value instance.get_networks.return_value = [] res = self.api.get(_get_path('networks'), {'limit': 'abc'}, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_limit_with_infinite_pagination_max_limit(self): instance = self.plugin.return_value instance.get_networks.return_value = [] cfg.CONF.set_override('pagination_max_limit', 'Infinite') self.api.get(_get_path('networks')) kwargs = self._get_collection_kwargs(limit=None) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_limit_with_negative_pagination_max_limit(self): instance = self.plugin.return_value instance.get_networks.return_value = [] cfg.CONF.set_default('pagination_max_limit', '-1') self.api.get(_get_path('networks')) kwargs = self._get_collection_kwargs(limit=None) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_limit_with_non_integer_pagination_max_limit(self): instance = self.plugin.return_value instance.get_networks.return_value = [] cfg.CONF.set_default('pagination_max_limit', 'abc') self.api.get(_get_path('networks')) kwargs = self._get_collection_kwargs(limit=None) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_marker(self): cfg.CONF.set_override('pagination_max_limit', '1000') instance = self.plugin.return_value instance.get_networks.return_value = [] marker = _uuid() self.api.get(_get_path('networks'), {'marker': marker}) kwargs = self._get_collection_kwargs(limit=1000, marker=marker) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_page_reverse(self): calls = [] instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'page_reverse': 'True'}) kwargs = self._get_collection_kwargs(page_reverse=True) calls.append(mock.call.get_networks(mock.ANY, **kwargs)) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) instance.get_networks.reset_mock() self.api.get(_get_path('networks'), {'page_reverse': 'False'}) kwargs = self._get_collection_kwargs(page_reverse=False) calls.append(mock.call.get_networks(mock.ANY, **kwargs)) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_page_reverse_with_non_bool(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'page_reverse': 'abc'}) kwargs = self._get_collection_kwargs(page_reverse=False) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_page_reverse_with_unspecific(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks')) kwargs = self._get_collection_kwargs(page_reverse=False) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_sort(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'sort_key': ['name', 'admin_state_up'], 'sort_dir': ['desc', 'asc']}) kwargs = self._get_collection_kwargs(sorts=[('name', False), ('admin_state_up', True), ('id', True)]) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_sort_with_primary_key(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'sort_key': ['name', 'admin_state_up', 'id'], 'sort_dir': ['desc', 'asc', 'desc']}) kwargs = self._get_collection_kwargs(sorts=[('name', False), ('admin_state_up', True), ('id', False)]) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_sort_without_direction(self): instance = self.plugin.return_value instance.get_networks.return_value = [] res = self.api.get(_get_path('networks'), {'sort_key': ['name']}, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_sort_with_invalid_attribute(self): instance = self.plugin.return_value instance.get_networks.return_value = [] res = self.api.get(_get_path('networks'), {'sort_key': 'abc', 'sort_dir': 'asc'}, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_sort_with_invalid_dirs(self): instance = self.plugin.return_value instance.get_networks.return_value = [] res = self.api.get(_get_path('networks'), {'sort_key': 'name', 'sort_dir': 'abc'}, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_emulated_sort(self): instance = self.plugin.return_value instance._NeutronPluginBaseV2__native_pagination_support = False instance._NeutronPluginBaseV2__native_sorting_support = False instance.get_networks.return_value = [] api = webtest.TestApp(router.APIRouter()) api.get(_get_path('networks'), {'sort_key': ['name', 'status'], 'sort_dir': ['desc', 'asc']}) kwargs = self._get_collection_kwargs( skipargs=['sorts', 'limit', 'marker', 'page_reverse']) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_emulated_sort_without_sort_field(self): instance = self.plugin.return_value instance._NeutronPluginBaseV2__native_pagination_support = False instance._NeutronPluginBaseV2__native_sorting_support = False instance.get_networks.return_value = [] api = webtest.TestApp(router.APIRouter()) api.get(_get_path('networks'), {'sort_key': ['name', 'status'], 'sort_dir': ['desc', 'asc'], 'fields': ['subnets']}) kwargs = self._get_collection_kwargs( skipargs=['sorts', 'limit', 'marker', 'page_reverse'], fields=_ArgMatcher(_list_cmp, ['name', 'status', 'id', 'subnets', 'shared', 'tenant_id'])) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_emulated_pagination(self): instance = self.plugin.return_value instance._NeutronPluginBaseV2__native_pagination_support = False instance.get_networks.return_value = [] api = webtest.TestApp(router.APIRouter()) api.get(_get_path('networks'), {'limit': 10, 'marker': 'foo', 'page_reverse': False}) kwargs = self._get_collection_kwargs(skipargs=['limit', 'marker', 'page_reverse']) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_native_pagination_without_native_sorting(self): instance = self.plugin.return_value instance._NeutronPluginBaseV2__native_sorting_support = False self.assertRaises(n_exc.Invalid, router.APIRouter) def test_native_pagination_without_allow_sorting(self): cfg.CONF.set_override('allow_sorting', False) instance = self.plugin.return_value instance.get_networks.return_value = [] api = webtest.TestApp(router.APIRouter()) api.get(_get_path('networks'), {'sort_key': ['name', 'admin_state_up'], 'sort_dir': ['desc', 'asc']}) kwargs = self._get_collection_kwargs(sorts=[('name', False), ('admin_state_up', True), ('id', True)]) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) # Note: since all resources use the same controller and validation # logic, we actually get really good coverage from testing just networks. class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase): def _test_list(self, req_tenant_id, real_tenant_id): env = {} if req_tenant_id: env = {'neutron.context': context.Context('', req_tenant_id)} input_dict = {'id': uuidutils.generate_uuid(), 'name': 'net1', 'admin_state_up': True, 'status': "ACTIVE", 'tenant_id': real_tenant_id, 'shared': False, 'subnets': []} return_value = [input_dict] instance = self.plugin.return_value instance.get_networks.return_value = return_value res = self.api.get(_get_path('networks', fmt=self.fmt), extra_environ=env) res = self.deserialize(res) self.assertIn('networks', res) if not req_tenant_id or req_tenant_id == real_tenant_id: # expect full list returned self.assertEqual(1, len(res['networks'])) output_dict = res['networks'][0] input_dict['shared'] = False self.assertEqual(len(input_dict), len(output_dict)) for k, v in six.iteritems(input_dict): self.assertEqual(v, output_dict[k]) else: # expect no results self.assertEqual(0, len(res['networks'])) def test_list_noauth(self): self._test_list(None, _uuid()) def test_list_keystone(self): tenant_id = _uuid() self._test_list(tenant_id, tenant_id) def test_list_keystone_bad(self): tenant_id = _uuid() self._test_list(tenant_id + "bad", tenant_id) def test_list_pagination(self): id1 = str(_uuid()) id2 = str(_uuid()) input_dict1 = {'id': id1, 'name': 'net1', 'admin_state_up': True, 'status': "ACTIVE", 'tenant_id': '', 'shared': False, 'subnets': []} input_dict2 = {'id': id2, 'name': 'net2', 'admin_state_up': True, 'status': "ACTIVE", 'tenant_id': '', 'shared': False, 'subnets': []} return_value = [input_dict1, input_dict2] instance = self.plugin.return_value instance.get_networks.return_value = return_value params = {'limit': ['2'], 'marker': [str(_uuid())], 'sort_key': ['name'], 'sort_dir': ['asc']} res = self.api.get(_get_path('networks'), params=params).json self.assertEqual(2, len(res['networks'])) self.assertEqual(sorted([id1, id2]), sorted([res['networks'][0]['id'], res['networks'][1]['id']])) self.assertIn('networks_links', res) next_links = [] previous_links = [] for r in res['networks_links']: if r['rel'] == 'next': next_links.append(r) if r['rel'] == 'previous': previous_links.append(r) self.assertEqual(1, len(next_links)) self.assertEqual(1, len(previous_links)) url = urlparse.urlparse(next_links[0]['href']) self.assertEqual(url.path, _get_path('networks')) params['marker'] = [id2] self.assertEqual(params, urlparse.parse_qs(url.query)) url = urlparse.urlparse(previous_links[0]['href']) self.assertEqual(url.path, _get_path('networks')) params['marker'] = [id1] params['page_reverse'] = ['True'] self.assertEqual(params, urlparse.parse_qs(url.query)) def test_list_pagination_with_last_page(self): id = str(_uuid()) input_dict = {'id': id, 'name': 'net1', 'admin_state_up': True, 'status': "ACTIVE", 'tenant_id': '', 'shared': False, 'subnets': []} return_value = [input_dict] instance = self.plugin.return_value instance.get_networks.return_value = return_value params = {'limit': ['2'], 'marker': str(_uuid())} res = self.api.get(_get_path('networks'), params=params).json self.assertEqual(1, len(res['networks'])) self.assertEqual(id, res['networks'][0]['id']) self.assertIn('networks_links', res) previous_links = [] for r in res['networks_links']: self.assertNotEqual(r['rel'], 'next') if r['rel'] == 'previous': previous_links.append(r) self.assertEqual(1, len(previous_links)) url = urlparse.urlparse(previous_links[0]['href']) self.assertEqual(url.path, _get_path('networks')) expect_params = params.copy() expect_params['marker'] = [id] expect_params['page_reverse'] = ['True'] self.assertEqual(expect_params, urlparse.parse_qs(url.query)) def test_list_pagination_with_empty_page(self): return_value = [] instance = self.plugin.return_value instance.get_networks.return_value = return_value params = {'limit': ['2'], 'marker': str(_uuid())} res = self.api.get(_get_path('networks'), params=params).json self.assertEqual([], res['networks']) previous_links = [] if 'networks_links' in res: for r in res['networks_links']: self.assertNotEqual(r['rel'], 'next') if r['rel'] == 'previous': previous_links.append(r) self.assertEqual(1, len(previous_links)) url = urlparse.urlparse(previous_links[0]['href']) self.assertEqual(url.path, _get_path('networks')) expect_params = params.copy() del expect_params['marker'] expect_params['page_reverse'] = ['True'] self.assertEqual(expect_params, urlparse.parse_qs(url.query)) def test_list_pagination_reverse_with_last_page(self): id = str(_uuid()) input_dict = {'id': id, 'name': 'net1', 'admin_state_up': True, 'status': "ACTIVE", 'tenant_id': '', 'shared': False, 'subnets': []} return_value = [input_dict] instance = self.plugin.return_value instance.get_networks.return_value = return_value params = {'limit': ['2'], 'marker': [str(_uuid())], 'page_reverse': ['True']} res = self.api.get(_get_path('networks'), params=params).json self.assertEqual(len(res['networks']), 1) self.assertEqual(id, res['networks'][0]['id']) self.assertIn('networks_links', res) next_links = [] for r in res['networks_links']: self.assertNotEqual(r['rel'], 'previous') if r['rel'] == 'next': next_links.append(r) self.assertEqual(1, len(next_links)) url = urlparse.urlparse(next_links[0]['href']) self.assertEqual(url.path, _get_path('networks')) expected_params = params.copy() del expected_params['page_reverse'] expected_params['marker'] = [id] self.assertEqual(expected_params, urlparse.parse_qs(url.query)) def test_list_pagination_reverse_with_empty_page(self): return_value = [] instance = self.plugin.return_value instance.get_networks.return_value = return_value params = {'limit': ['2'], 'marker': [str(_uuid())], 'page_reverse': ['True']} res = self.api.get(_get_path('networks'), params=params).json self.assertEqual([], res['networks']) next_links = [] if 'networks_links' in res: for r in res['networks_links']: self.assertNotEqual(r['rel'], 'previous') if r['rel'] == 'next': next_links.append(r) self.assertEqual(1, len(next_links)) url = urlparse.urlparse(next_links[0]['href']) self.assertEqual(url.path, _get_path('networks')) expect_params = params.copy() del expect_params['marker'] del expect_params['page_reverse'] self.assertEqual(expect_params, urlparse.parse_qs(url.query)) def test_create(self): net_id = _uuid() data = {'network': {'name': 'net1', 'admin_state_up': True, 'tenant_id': _uuid()}} return_value = {'subnets': [], 'status': "ACTIVE", 'id': net_id} return_value.update(data['network'].copy()) instance = self.plugin.return_value instance.create_network.return_value = return_value instance.get_networks_count.return_value = 0 res = self.api.post(_get_path('networks', fmt=self.fmt), self.serialize(data), content_type='application/' + self.fmt) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('network', res) net = res['network'] self.assertEqual(net_id, net['id']) self.assertEqual("ACTIVE", net['status']) def test_create_use_defaults(self): net_id = _uuid() initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}} full_input = {'network': {'admin_state_up': True, 'shared': False}} full_input['network'].update(initial_input['network']) return_value = {'id': net_id, 'status': "ACTIVE"} return_value.update(full_input['network']) instance = self.plugin.return_value instance.create_network.return_value = return_value instance.get_networks_count.return_value = 0 res = self.api.post(_get_path('networks', fmt=self.fmt), self.serialize(initial_input), content_type='application/' + self.fmt) instance.create_network.assert_called_with(mock.ANY, network=full_input) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('network', res) net = res['network'] self.assertEqual(net_id, net['id']) self.assertTrue(net['admin_state_up']) self.assertEqual("ACTIVE", net['status']) def test_create_no_keystone_env(self): data = {'name': 'net1'} self._test_create_failure_bad_request('networks', data) def test_create_with_keystone_env(self): tenant_id = _uuid() net_id = _uuid() env = {'neutron.context': context.Context('', tenant_id)} # tenant_id should be fetched from env initial_input = {'network': {'name': 'net1'}} full_input = {'network': {'admin_state_up': True, 'shared': False, 'tenant_id': tenant_id}} full_input['network'].update(initial_input['network']) return_value = {'id': net_id, 'status': "ACTIVE"} return_value.update(full_input['network']) instance = self.plugin.return_value instance.create_network.return_value = return_value instance.get_networks_count.return_value = 0 res = self.api.post(_get_path('networks', fmt=self.fmt), self.serialize(initial_input), content_type='application/' + self.fmt, extra_environ=env) instance.create_network.assert_called_with(mock.ANY, network=full_input) self.assertEqual(exc.HTTPCreated.code, res.status_int) def test_create_bad_keystone_tenant(self): tenant_id = _uuid() data = {'network': {'name': 'net1', 'tenant_id': tenant_id}} env = {'neutron.context': context.Context('', tenant_id + "bad")} self._test_create_failure_bad_request('networks', data, extra_environ=env) def test_create_no_body(self): data = {'whoa': None} self._test_create_failure_bad_request('networks', data) def test_create_body_string_not_json(self): data = 'a string' self._test_create_failure_bad_request('networks', data) def test_create_body_boolean_not_json(self): data = True self._test_create_failure_bad_request('networks', data) def test_create_no_resource(self): data = {} self._test_create_failure_bad_request('networks', data) def test_create_missing_attr(self): data = {'port': {'what': 'who', 'tenant_id': _uuid()}} self._test_create_failure_bad_request('ports', data) def test_create_readonly_attr(self): data = {'network': {'name': 'net1', 'tenant_id': _uuid(), 'status': "ACTIVE"}} self._test_create_failure_bad_request('networks', data) def test_create_with_too_long_name(self): data = {'network': {'name': "12345678" * 32, 'admin_state_up': True, 'tenant_id': _uuid()}} res = self.api.post(_get_path('networks', fmt=self.fmt), self.serialize(data), content_type='application/' + self.fmt, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_bulk(self): data = {'networks': [{'name': 'net1', 'admin_state_up': True, 'tenant_id': _uuid()}, {'name': 'net2', 'admin_state_up': True, 'tenant_id': _uuid()}]} def side_effect(context, network): net = network.copy() net['network'].update({'subnets': []}) return net['network'] instance = self.plugin.return_value instance.create_network.side_effect = side_effect instance.get_networks_count.return_value = 0 res = self.api.post(_get_path('networks', fmt=self.fmt), self.serialize(data), content_type='application/' + self.fmt) self.assertEqual(exc.HTTPCreated.code, res.status_int) def _test_create_failure_bad_request(self, resource, data, **kwargs): res = self.api.post(_get_path(resource, fmt=self.fmt), self.serialize(data), content_type='application/' + self.fmt, expect_errors=True, **kwargs) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_bulk_networks_none(self): self._test_create_failure_bad_request('networks', {'networks': None}) def test_create_bulk_networks_empty_list(self): self._test_create_failure_bad_request('networks', {'networks': []}) def test_create_bulk_missing_attr(self): data = {'ports': [{'what': 'who', 'tenant_id': _uuid()}]} self._test_create_failure_bad_request('ports', data) def test_create_bulk_partial_body(self): data = {'ports': [{'device_id': 'device_1', 'tenant_id': _uuid()}, {'tenant_id': _uuid()}]} self._test_create_failure_bad_request('ports', data) def test_create_attr_not_specified(self): net_id = _uuid() tenant_id = _uuid() device_id = _uuid() initial_input = {'port': {'name': '', 'network_id': net_id, 'tenant_id': tenant_id, 'device_id': device_id, 'admin_state_up': True}} full_input = {'port': {'admin_state_up': True, 'mac_address': constants.ATTR_NOT_SPECIFIED, 'fixed_ips': constants.ATTR_NOT_SPECIFIED, 'device_owner': ''}} full_input['port'].update(initial_input['port']) return_value = {'id': _uuid(), 'status': 'ACTIVE', 'admin_state_up': True, 'mac_address': 'ca:fe:de:ad:be:ef', 'device_id': device_id, 'device_owner': ''} return_value.update(initial_input['port']) instance = self.plugin.return_value instance.get_network.return_value = { 'tenant_id': six.text_type(tenant_id) } instance.get_ports_count.return_value = 1 instance.create_port.return_value = return_value res = self.api.post(_get_path('ports', fmt=self.fmt), self.serialize(initial_input), content_type='application/' + self.fmt) instance.create_port.assert_called_with(mock.ANY, port=full_input) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('port', res) port = res['port'] self.assertEqual(net_id, port['network_id']) self.assertEqual('ca:fe:de:ad:be:ef', port['mac_address']) def test_create_return_extra_attr(self): net_id = _uuid() data = {'network': {'name': 'net1', 'admin_state_up': True, 'tenant_id': _uuid()}} return_value = {'subnets': [], 'status': "ACTIVE", 'id': net_id, 'v2attrs:something': "123"} return_value.update(data['network'].copy()) instance = self.plugin.return_value instance.create_network.return_value = return_value instance.get_networks_count.return_value = 0 res = self.api.post(_get_path('networks', fmt=self.fmt), self.serialize(data), content_type='application/' + self.fmt) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('network', res) net = res['network'] self.assertEqual(net_id, net['id']) self.assertEqual("ACTIVE", net['status']) self.assertNotIn('v2attrs:something', net) def test_fields(self): return_value = {'name': 'net1', 'admin_state_up': True, 'subnets': []} instance = self.plugin.return_value instance.get_network.return_value = return_value self.api.get(_get_path('networks', id=uuidutils.generate_uuid(), fmt=self.fmt)) def _test_delete(self, req_tenant_id, real_tenant_id, expected_code, expect_errors=False): env = {} if req_tenant_id: env = {'neutron.context': context.Context('', req_tenant_id)} instance = self.plugin.return_value instance.get_network.return_value = {'tenant_id': real_tenant_id, 'shared': False} instance.delete_network.return_value = None res = self.api.delete(_get_path('networks', id=uuidutils.generate_uuid(), fmt=self.fmt), extra_environ=env, expect_errors=expect_errors) self.assertEqual(expected_code, res.status_int) def test_delete_noauth(self): self._test_delete(None, _uuid(), exc.HTTPNoContent.code) def test_delete_keystone(self): tenant_id = _uuid() self._test_delete(tenant_id, tenant_id, exc.HTTPNoContent.code) def test_delete_keystone_bad_tenant(self): tenant_id = _uuid() self._test_delete(tenant_id + "bad", tenant_id, exc.HTTPNotFound.code, expect_errors=True) def _test_get(self, req_tenant_id, real_tenant_id, expected_code, expect_errors=False): env = {} shared = False if req_tenant_id: env = {'neutron.context': context.Context('', req_tenant_id)} if req_tenant_id.endswith('another'): shared = True env['neutron.context'].roles = ['tenant_admin'] data = {'tenant_id': real_tenant_id, 'shared': shared} instance = self.plugin.return_value instance.get_network.return_value = data res = self.api.get(_get_path('networks', id=uuidutils.generate_uuid(), fmt=self.fmt), extra_environ=env, expect_errors=expect_errors) self.assertEqual(expected_code, res.status_int) return res def test_get_noauth(self): self._test_get(None, _uuid(), 200) def test_get_keystone(self): tenant_id = _uuid() self._test_get(tenant_id, tenant_id, 200) def test_get_keystone_bad_tenant(self): tenant_id = _uuid() self._test_get(tenant_id + "bad", tenant_id, exc.HTTPNotFound.code, expect_errors=True) def test_get_keystone_shared_network(self): tenant_id = _uuid() self._test_get(tenant_id + "another", tenant_id, 200) def test_get_keystone_strip_admin_only_attribute(self): tenant_id = _uuid() # Inject rule in policy engine rules = oslo_policy.Rules.from_dict( {'get_network:name': "rule:admin_only"}) policy.set_rules(rules, overwrite=False) res = self._test_get(tenant_id, tenant_id, 200) res = self.deserialize(res) self.assertNotIn('name', res['network']) def _test_update(self, req_tenant_id, real_tenant_id, expected_code, expect_errors=False): env = {} if req_tenant_id: env = {'neutron.context': context.Context('', req_tenant_id)} # leave out 'name' field intentionally data = {'network': {'admin_state_up': True}} return_value = {'subnets': []} return_value.update(data['network'].copy()) instance = self.plugin.return_value instance.get_network.return_value = {'tenant_id': real_tenant_id, 'shared': False} instance.update_network.return_value = return_value res = self.api.put(_get_path('networks', id=uuidutils.generate_uuid(), fmt=self.fmt), self.serialize(data), extra_environ=env, expect_errors=expect_errors) # Ensure id attribute is included in fields returned by GET call # in update procedure. self.assertEqual(1, instance.get_network.call_count) self.assertIn('id', instance.get_network.call_args[1]['fields']) self.assertEqual(res.status_int, expected_code) def test_update_noauth(self): self._test_update(None, _uuid(), 200) def test_update_keystone(self): tenant_id = _uuid() self._test_update(tenant_id, tenant_id, 200) def test_update_keystone_bad_tenant(self): tenant_id = _uuid() self._test_update(tenant_id + "bad", tenant_id, exc.HTTPNotFound.code, expect_errors=True) def test_update_keystone_no_tenant(self): tenant_id = _uuid() self._test_update(tenant_id, None, exc.HTTPNotFound.code, expect_errors=True) def test_update_readonly_field(self): data = {'network': {'status': "NANANA"}} res = self.api.put(_get_path('networks', id=_uuid()), self.serialize(data), content_type='application/' + self.fmt, expect_errors=True) self.assertEqual(400, res.status_int) def test_invalid_attribute_field(self): data = {'network': {'invalid_key1': "foo1", 'invalid_key2': "foo2"}} res = self.api.put(_get_path('networks', id=_uuid()), self.serialize(data), content_type='application/' + self.fmt, expect_errors=True) self.assertEqual(400, res.status_int) def test_retry_on_index(self): instance = self.plugin.return_value instance.get_networks.side_effect = [db_exc.RetryRequest(None), []] api = webtest.TestApp(router.APIRouter()) api.get(_get_path('networks', fmt=self.fmt)) self.assertTrue(instance.get_networks.called) def test_retry_on_show(self): instance = self.plugin.return_value instance.get_network.side_effect = [db_exc.RetryRequest(None), {}] api = webtest.TestApp(router.APIRouter()) api.get(_get_path('networks', _uuid(), fmt=self.fmt)) self.assertTrue(instance.get_network.called) class SubresourceTest(base.BaseTestCase): def setUp(self): super(SubresourceTest, self).setUp() plugin = 'neutron.tests.unit.api.v2.test_base.TestSubresourcePlugin' extensions.PluginAwareExtensionManager._instance = None self.useFixture(tools.AttributeMapMemento()) self.config_parse() self.setup_coreplugin(plugin) self._plugin_patcher = mock.patch(plugin, autospec=True) self.plugin = self._plugin_patcher.start() api = router.APIRouter() SUB_RESOURCES = {} RESOURCE_ATTRIBUTE_MAP = {} SUB_RESOURCES['dummy'] = { 'collection_name': 'dummies', 'parent': {'collection_name': 'networks', 'member_name': 'network'} } RESOURCE_ATTRIBUTE_MAP['dummies'] = { 'foo': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'default': '', 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'required_by_policy': True, 'is_visible': True} } collection_name = SUB_RESOURCES['dummy'].get('collection_name') resource_name = 'dummy' parent = SUB_RESOURCES['dummy'].get('parent') params = RESOURCE_ATTRIBUTE_MAP['dummies'] member_actions = {'mactions': 'GET'} _plugin = manager.NeutronManager.get_plugin() controller = v2_base.create_resource(collection_name, resource_name, _plugin, params, member_actions=member_actions, parent=parent, allow_bulk=True, allow_pagination=True, allow_sorting=True) path_prefix = "/%s/{%s_id}/%s" % (parent['collection_name'], parent['member_name'], collection_name) mapper_kwargs = dict(controller=controller, path_prefix=path_prefix) api.map.collection(collection_name, resource_name, **mapper_kwargs) api.map.resource(collection_name, collection_name, controller=controller, parent_resource=parent, member=member_actions) self.api = webtest.TestApp(api) def tearDown(self): super(SubresourceTest, self).tearDown() def test_index_sub_resource(self): instance = self.plugin.return_value self.api.get('/networks/id1/dummies') instance.get_network_dummies.assert_called_once_with(mock.ANY, filters=mock.ANY, fields=mock.ANY, network_id='id1') def test_show_sub_resource(self): instance = self.plugin.return_value dummy_id = _uuid() self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id)) instance.get_network_dummy.assert_called_once_with(mock.ANY, dummy_id, network_id='id1', fields=mock.ANY) def test_create_sub_resource(self): instance = self.plugin.return_value body = {'dummy': {'foo': 'bar', 'tenant_id': _uuid()}} self.api.post_json('/networks/id1/dummies', body) instance.create_network_dummy.assert_called_once_with(mock.ANY, network_id='id1', dummy=body) def test_update_sub_resource(self): instance = self.plugin.return_value dummy_id = _uuid() body = {'dummy': {'foo': 'bar'}} self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id), body) instance.update_network_dummy.assert_called_once_with(mock.ANY, dummy_id, network_id='id1', dummy=body) def test_update_subresource_to_none(self): instance = self.plugin.return_value dummy_id = _uuid() body = {'dummy': {}} self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id), body) instance.update_network_dummy.assert_called_once_with(mock.ANY, dummy_id, network_id='id1', dummy=body) def test_delete_sub_resource(self): instance = self.plugin.return_value dummy_id = _uuid() self.api.delete('/networks/id1' + _get_path('dummies', id=dummy_id)) instance.delete_network_dummy.assert_called_once_with(mock.ANY, dummy_id, network_id='id1') def test_sub_resource_member_actions(self): instance = self.plugin.return_value dummy_id = _uuid() self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id, action='mactions')) instance.mactions.assert_called_once_with(mock.ANY, dummy_id, network_id='id1') # Note: since all resources use the same controller and validation # logic, we actually get really good coverage from testing just networks. class V2Views(base.BaseTestCase): def _view(self, keys, collection, resource): data = dict((key, 'value') for key in keys) data['fake'] = 'value' attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[collection] controller = v2_base.Controller(None, collection, resource, attr_info) res = controller._view(context.get_admin_context(), data) self.assertNotIn('fake', res) for key in keys: self.assertIn(key, res) def test_network(self): keys = ('id', 'name', 'subnets', 'admin_state_up', 'status', 'tenant_id') self._view(keys, 'networks', 'network') def test_port(self): keys = ('id', 'network_id', 'mac_address', 'fixed_ips', 'device_id', 'admin_state_up', 'tenant_id', 'status') self._view(keys, 'ports', 'port') def test_subnet(self): keys = ('id', 'network_id', 'tenant_id', 'gateway_ip', 'ip_version', 'cidr', 'enable_dhcp') self._view(keys, 'subnets', 'subnet') class NotificationTest(APIv2TestBase): def setUp(self): super(NotificationTest, self).setUp() fake_notifier.reset() def _resource_op_notifier(self, opname, resource, expected_errors=False): initial_input = {resource: {'name': 'myname'}} instance = self.plugin.return_value instance.get_networks.return_value = initial_input instance.get_networks_count.return_value = 0 expected_code = exc.HTTPCreated.code if opname == 'create': initial_input[resource]['tenant_id'] = _uuid() res = self.api.post_json( _get_path('networks'), initial_input, expect_errors=expected_errors) if opname == 'update': res = self.api.put_json( _get_path('networks', id=_uuid()), initial_input, expect_errors=expected_errors) expected_code = exc.HTTPOk.code if opname == 'delete': initial_input[resource]['tenant_id'] = _uuid() res = self.api.delete( _get_path('networks', id=_uuid()), expect_errors=expected_errors) expected_code = exc.HTTPNoContent.code expected_events = ('.'.join([resource, opname, "start"]), '.'.join([resource, opname, "end"])) self.assertEqual(len(expected_events), len(fake_notifier.NOTIFICATIONS)) for msg, event in zip(fake_notifier.NOTIFICATIONS, expected_events): self.assertEqual('INFO', msg['priority']) self.assertEqual(event, msg['event_type']) if opname == 'delete' and event == 'network.delete.end': self.assertIn('payload', msg) resource = msg['payload'] self.assertIn('network_id', resource) self.assertIn('network', resource) self.assertEqual(expected_code, res.status_int) def test_network_create_notifer(self): self._resource_op_notifier('create', 'network') def test_network_delete_notifer(self): self._resource_op_notifier('delete', 'network') def test_network_update_notifer(self): self._resource_op_notifier('update', 'network') class RegistryNotificationTest(APIv2TestBase): def setUp(self): # This test does not have database support so tracking cannot be used cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS') super(RegistryNotificationTest, self).setUp() def _test_registry_notify(self, opname, resource, initial_input=None): instance = self.plugin.return_value instance.get_networks.return_value = initial_input instance.get_networks_count.return_value = 0 expected_code = exc.HTTPCreated.code with mock.patch.object(registry, 'notify') as notify: if opname == 'create': res = self.api.post_json( _get_path('networks'), initial_input) if opname == 'update': res = self.api.put_json( _get_path('networks', id=_uuid()), initial_input) expected_code = exc.HTTPOk.code if opname == 'delete': res = self.api.delete(_get_path('networks', id=_uuid())) expected_code = exc.HTTPNoContent.code self.assertTrue(notify.called) self.assertEqual(expected_code, res.status_int) def test_network_create_registry_notify(self): input = {'network': {'name': 'net', 'tenant_id': _uuid()}} self._test_registry_notify('create', 'network', input) def test_network_delete_registry_notify(self): self._test_registry_notify('delete', 'network') def test_network_update_registry_notify(self): input = {'network': {'name': 'net'}} self._test_registry_notify('update', 'network', input) def test_networks_create_bulk_registry_notify(self): input = {'networks': [{'name': 'net1', 'tenant_id': _uuid()}, {'name': 'net2', 'tenant_id': _uuid()}]} self._test_registry_notify('create', 'network', input) class QuotaTest(APIv2TestBase): def setUp(self): # This test does not have database support so tracking cannot be used cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS') super(QuotaTest, self).setUp() # Use mock to let the API use a different QuotaEngine instance for # unit test in this class. This will ensure resource are registered # again and instantiated with neutron.quota.resource.CountableResource replacement_registry = resource_registry.ResourceRegistry() registry_patcher = mock.patch('neutron.quota.resource_registry.' 'ResourceRegistry.get_instance') mock_registry = registry_patcher.start().return_value mock_registry.get_resource = replacement_registry.get_resource mock_registry.resources = replacement_registry.resources # Register a resource replacement_registry.register_resource_by_name('network') def test_create_network_quota(self): cfg.CONF.set_override('quota_network', 1, group='QUOTAS') initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}} full_input = {'network': {'admin_state_up': True, 'subnets': []}} full_input['network'].update(initial_input['network']) instance = self.plugin.return_value instance.get_networks_count.return_value = 1 res = self.api.post_json( _get_path('networks'), initial_input, expect_errors=True) instance.get_networks_count.assert_called_with(mock.ANY, filters=mock.ANY) self.assertIn("Quota exceeded for resources", res.json['NeutronError']['message']) def test_create_network_quota_no_counts(self): cfg.CONF.set_override('quota_network', 1, group='QUOTAS') initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}} full_input = {'network': {'admin_state_up': True, 'subnets': []}} full_input['network'].update(initial_input['network']) instance = self.plugin.return_value instance.get_networks_count.side_effect = ( NotImplementedError()) instance.get_networks.return_value = ["foo"] res = self.api.post_json( _get_path('networks'), initial_input, expect_errors=True) instance.get_networks_count.assert_called_with(mock.ANY, filters=mock.ANY) self.assertIn("Quota exceeded for resources", res.json['NeutronError']['message']) def test_create_network_quota_without_limit(self): cfg.CONF.set_override('quota_network', -1, group='QUOTAS') initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}} instance = self.plugin.return_value instance.get_networks_count.return_value = 3 res = self.api.post_json( _get_path('networks'), initial_input) self.assertEqual(exc.HTTPCreated.code, res.status_int) class ExtensionTestCase(base.BaseTestCase): def setUp(self): # This test does not have database support so tracking cannot be used cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS') super(ExtensionTestCase, self).setUp() plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2' # Ensure existing ExtensionManager is not used extensions.PluginAwareExtensionManager._instance = None self.useFixture(tools.AttributeMapMemento()) # Create the default configurations self.config_parse() # Update the plugin and extensions path self.setup_coreplugin(plugin) cfg.CONF.set_override('api_extensions_path', EXTDIR) self._plugin_patcher = mock.patch(plugin, autospec=True) self.plugin = self._plugin_patcher.start() # Instantiate mock plugin and enable the V2attributes extension manager.NeutronManager.get_plugin().supported_extension_aliases = ( ["v2attrs"]) api = router.APIRouter() self.api = webtest.TestApp(api) quota.QUOTAS._driver = None cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') def tearDown(self): super(ExtensionTestCase, self).tearDown() self.api = None self.plugin = None def test_extended_create(self): net_id = _uuid() initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid(), 'v2attrs:something_else': "abc"}} data = {'network': {'admin_state_up': True, 'shared': False}} data['network'].update(initial_input['network']) return_value = {'subnets': [], 'status': "ACTIVE", 'id': net_id, 'v2attrs:something': "123"} return_value.update(data['network'].copy()) instance = self.plugin.return_value instance.create_network.return_value = return_value instance.get_networks_count.return_value = 0 res = self.api.post_json(_get_path('networks'), initial_input) instance.create_network.assert_called_with(mock.ANY, network=data) self.assertEqual(exc.HTTPCreated.code, res.status_int) self.assertIn('network', res.json) net = res.json['network'] self.assertEqual(net_id, net['id']) self.assertEqual("ACTIVE", net['status']) self.assertEqual("123", net['v2attrs:something']) self.assertNotIn('v2attrs:something_else', net) class TestSubresourcePlugin(object): def get_network_dummies(self, context, network_id, filters=None, fields=None): return [] def get_network_dummy(self, context, id, network_id, fields=None): return {} def create_network_dummy(self, context, network_id, dummy): return {} def update_network_dummy(self, context, id, network_id, dummy): return {} def delete_network_dummy(self, context, id, network_id): return def mactions(self, context, id, network_id): return class ListArgsTestCase(base.BaseTestCase): def test_list_args(self): path = '/?fields=4&foo=3&fields=2&bar=1' request = webob.Request.blank(path) expect_val = ['2', '4'] actual_val = api_common.list_args(request, 'fields') self.assertEqual(expect_val, sorted(actual_val)) def test_list_args_with_empty(self): path = '/?foo=4&bar=3&baz=2&qux=1' request = webob.Request.blank(path) self.assertEqual([], api_common.list_args(request, 'fields')) class FiltersTestCase(base.BaseTestCase): def test_all_skip_args(self): path = '/?fields=4&fields=3&fields=2&fields=1' request = webob.Request.blank(path) self.assertEqual({}, api_common.get_filters(request, None, ["fields"])) def test_blank_values(self): path = '/?foo=&bar=&baz=&qux=' request = webob.Request.blank(path) self.assertEqual({}, api_common.get_filters(request, {})) def test_no_attr_info(self): path = '/?foo=4&bar=3&baz=2&qux=1' request = webob.Request.blank(path) expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']} actual_val = api_common.get_filters(request, {}) self.assertEqual(expect_val, actual_val) def test_attr_info_without_conversion(self): path = '/?foo=4&bar=3&baz=2&qux=1' request = webob.Request.blank(path) attr_info = {'foo': {'key': 'val'}} expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']} actual_val = api_common.get_filters(request, attr_info) self.assertEqual(expect_val, actual_val) def test_attr_info_with_convert_list_to(self): path = '/?foo=key=4&bar=3&foo=key=2&qux=1' request = webob.Request.blank(path) attr_info = { 'foo': { 'convert_list_to': converters.convert_kvp_list_to_dict, } } expect_val = {'foo': {'key': ['2', '4']}, 'bar': ['3'], 'qux': ['1']} actual_val = api_common.get_filters(request, attr_info) self.assertOrderedEqual(expect_val, actual_val) def test_attr_info_with_convert_to(self): path = '/?foo=4&bar=3&baz=2&qux=1' request = webob.Request.blank(path) attr_info = {'foo': {'convert_to': converters.convert_to_int}} expect_val = {'foo': [4], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']} actual_val = api_common.get_filters(request, attr_info) self.assertEqual(expect_val, actual_val) class CreateResourceTestCase(base.BaseTestCase): def test_resource_creation(self): resource = v2_base.create_resource('fakes', 'fake', None, {}) self.assertIsInstance(resource, webob.dec.wsgify) ```
[ { "content": "```python\n# Copyright 2019, The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses...
[ { "content": "<|memory_start|>```python\n# Copyright 2019, The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apa...
```python # Copyright 2019, The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for the `tensor_encoding` package.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.py_utils import assert_compatible from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.py_utils import merge_dicts from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.py_utils import OrderedEnum from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.py_utils import split_dict_py_tf from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.py_utils import static_or_dynamic_shape from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.tf_utils import fast_walsh_hadamard_transform from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.tf_utils import pack_into_int from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.tf_utils import random_floats from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.tf_utils import random_floats_cmwc from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.tf_utils import random_signs from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.tf_utils import random_signs_cmwc from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.tf_utils import unpack_from_int ```
[ { "content": "Here is the source code:\n```python\nimport requests # Used to make HTTP requests\nimport json # Used to parse JSON\nimport os # Used to infer environment variables\n\nAPI_TAGO = os.environ.get('TAGO_API') or 'https://api.tago.io'\nREALTIME = os.environ.get('TAGO_REALTIME') or 'https://realtime...
[ { "content": "Here is the source code:\n<|memory_start|>```python\nimport requests # Used to make HTTP requests\nimport json # Used to parse JSON\nimport os # Used to infer environment variables\n\nAPI_TAGO = os.environ.get('TAGO_API') or 'https://api.tago.io'\nREALTIME = os.environ.get('TAGO_REALTIME') or '...
```python import requests # Used to make HTTP requests import json # Used to parse JSON import os # Used to infer environment variables API_TAGO = os.environ.get('TAGO_API') or 'https://api.tago.io' REALTIME = os.environ.get('TAGO_REALTIME') or 'https://realtime.tago.io' class Plan: def __init__(self, acc_token): self.token = acc_token self.default_headers = { 'content-type': 'application/json', 'Account-Token': acc_token} return def setPlanParameters(self, data): data = data if data else {} return requests.post('{api_endpoint}/account/plan'.format(api_endpoint=API_TAGO), headers=self.default_headers, json=data).json() def getPriceToUpdate(self, data): return requests.get('{api_endpoint}/account/plan_value'.format(api_endpoint=API_TAGO), headers=self.default_headers, params=json.dumps(data)).json() def getActivePlan(self): return requests.get('{api_endpoint}/account/plan'.format(api_endpoint=API_TAGO), headers=self.default_headers).json() def getCurrentPrices(self): return requests.get('{api_endpoint}/pricing'.format(api_endpoint=API_TAGO), headers=self.default_headers).json() def summary(self): return requests.get('{api_endpoint}/billing'.format(api_endpoint=API_TAGO), headers=self.default_headers).json() ```
[ { "content": "Provide an exact copy of the source code:\n```python\nimport _plotly_utils.basevalidators\n\n\nclass CaxisValidator(_plotly_utils.basevalidators.CompoundValidator):\n def __init__(self, plotly_name=\"caxis\", parent_name=\"layout.ternary\", **kwargs):\n super(CaxisValidator, self).__init...
[ { "content": "Provide an exact copy of the source code:\n<|memory_start|>```python\nimport _plotly_utils.basevalidators\n\n\nclass CaxisValidator(_plotly_utils.basevalidators.CompoundValidator):\n def __init__(self, plotly_name=\"caxis\", parent_name=\"layout.ternary\", **kwargs):\n super(CaxisValidat...
```python import _plotly_utils.basevalidators class CaxisValidator(_plotly_utils.basevalidators.CompoundValidator): def __init__(self, plotly_name="caxis", parent_name="layout.ternary", **kwargs): super(CaxisValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, data_class_str=kwargs.pop("data_class_str", "Caxis"), data_docs=kwargs.pop( "data_docs", """ color Sets default for all colors associated with this axis all at once: line, font, tick, and grid colors. Grid color is lightened by blending this with the plot background Individual pieces can override this. dtick Sets the step in-between ticks on this axis. Use with `tick0`. Must be a positive number, or special strings available to "log" and "date" axes. If the axis `type` is "log", then ticks are set every 10^(n*dtick) where n is the tick number. For example, to set a tick mark at 1, 10, 100, 1000, ... set dtick to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2. To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to log_10(5), or 0.69897000433. "log" has several special values; "L<f>", where `f` is a positive number, gives ticks linearly spaced in value (but not position). For example `tick0` = 0.1, `dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus small digits between, use "D1" (all digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and "D2". If the axis `type` is "date", then you must convert the time to milliseconds. For example, to set the interval between ticks to one day, set `dtick` to 86400000.0. "date" also has special values "M<n>" gives ticks spaced by a number of months. `n` must be a positive integer. To set ticks on the 15th of every third month, set `tick0` to "2000-01-15" and `dtick` to "M3". To set ticks every 4 years, set `dtick` to "M48" exponentformat Determines a formatting rule for the tick exponents. For example, consider the number 1,000,000,000. If "none", it appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If "power", 1x10^9 (with 9 in a super script). If "SI", 1G. If "B", 1B. gridcolor Sets the color of the grid lines. gridwidth Sets the width (in px) of the grid lines. hoverformat Sets the hover text formatting rule using d3 formatting mini-languages which are very similar to those in Python. For numbers, see: https://github.com/d3/d3-3.x-api- reference/blob/master/Formatting.md#d3_format. And for dates see: https://github.com/d3/d3-time- format#locale_format. We add two items to d3's date formatter: "%h" for half of the year as a decimal number as well as "%{n}f" for fractional seconds with n digits. For example, *2016-10-13 09:15:23.456* with tickformat "%H~%M~%S.%2f" would display "09~15~23.46" layer Sets the layer on which this axis is displayed. If *above traces*, this axis is displayed above all the subplot's traces If *below traces*, this axis is displayed below all the subplot's traces, but above the grid lines. Useful when used together with scatter-like traces with `cliponaxis` set to False to show markers and/or text nodes above this axis. linecolor Sets the axis line color. linewidth Sets the width (in px) of the axis line. min The minimum value visible on this axis. The maximum is determined by the sum minus the minimum values of the other two axes. The full view corresponds to all the minima set to zero. minexponent Hide SI prefix for 10^n if |n| is below this number. This only has an effect when `tickformat` is "SI" or "B". nticks Specifies the maximum number of ticks for the particular axis. The actual number of ticks will be chosen automatically to be less than or equal to `nticks`. Has an effect only if `tickmode` is set to "auto". separatethousands If "true", even 4-digit integers are separated showexponent If "all", all exponents are shown besides their significands. If "first", only the exponent of the first tick is shown. If "last", only the exponent of the last tick is shown. If "none", no exponents appear. showgrid Determines whether or not grid lines are drawn. If True, the grid lines are drawn at every tick mark. showline Determines whether or not a line bounding this axis is drawn. showticklabels Determines whether or not the tick labels are drawn. showtickprefix If "all", all tick labels are displayed with a prefix. If "first", only the first tick is displayed with a prefix. If "last", only the last tick is displayed with a suffix. If "none", tick prefixes are hidden. showticksuffix Same as `showtickprefix` but for tick suffixes. tick0 Sets the placement of the first tick on this axis. Use with `dtick`. If the axis `type` is "log", then you must take the log of your starting tick (e.g. to set the starting tick to 100, set the `tick0` to 2) except when `dtick`=*L<f>* (see `dtick` for more info). If the axis `type` is "date", it should be a date string, like date data. If the axis `type` is "category", it should be a number, using the scale where each category is assigned a serial number from zero in the order it appears. tickangle Sets the angle of the tick labels with respect to the horizontal. For example, a `tickangle` of -90 draws the tick labels vertically. tickcolor Sets the tick color. tickfont Sets the tick font. tickformat Sets the tick label formatting rule using d3 formatting mini-languages which are very similar to those in Python. For numbers, see: https://github.com/d3/d3-3.x-api- reference/blob/master/Formatting.md#d3_format. And for dates see: https://github.com/d3/d3-time- format#locale_format. We add two items to d3's date formatter: "%h" for half of the year as a decimal number as well as "%{n}f" for fractional seconds with n digits. For example, *2016-10-13 09:15:23.456* with tickformat "%H~%M~%S.%2f" would display "09~15~23.46" tickformatstops A tuple of :class:`plotly.graph_objects.layout. ternary.caxis.Tickformatstop` instances or dicts with compatible properties tickformatstopdefaults When used in a template (as layout.template.lay out.ternary.caxis.tickformatstopdefaults), sets the default property values to use for elements of layout.ternary.caxis.tickformatstops ticklen Sets the tick length (in px). tickmode Sets the tick mode for this axis. If "auto", the number of ticks is set via `nticks`. If "linear", the placement of the ticks is determined by a starting position `tick0` and a tick step `dtick` ("linear" is the default value if `tick0` and `dtick` are provided). If "array", the placement of the ticks is set via `tickvals` and the tick text is `ticktext`. ("array" is the default value if `tickvals` is provided). tickprefix Sets a tick label prefix. ticks Determines whether ticks are drawn or not. If "", this axis' ticks are not drawn. If "outside" ("inside"), this axis' are drawn outside (inside) the axis lines. ticksuffix Sets a tick label suffix. ticktext Sets the text displayed at the ticks position via `tickvals`. Only has an effect if `tickmode` is set to "array". Used with `tickvals`. ticktextsrc Sets the source reference on Chart Studio Cloud for ticktext . tickvals Sets the values at which ticks on this axis appear. Only has an effect if `tickmode` is set to "array". Used with `ticktext`. tickvalssrc Sets the source reference on Chart Studio Cloud for tickvals . tickwidth Sets the tick width (in px). title :class:`plotly.graph_objects.layout.ternary.cax is.Title` instance or dict with compatible properties titlefont Deprecated: Please use layout.ternary.caxis.title.font instead. Sets this axis' title font. Note that the title's font used to be customized by the now deprecated `titlefont` attribute. uirevision Controls persistence of user-driven changes in axis `min`, and `title` if in `editable: true` configuration. Defaults to `ternary<N>.uirevision`. """, ), **kwargs ) ```
[ { "content": "Return the code unaltered:\n```python\n# -*- coding: utf-8 -*-\n'''\nQueries for experiments with ENDSEQ operator\n'''\n\nimport os\n\nfrom tool.attributes import get_move_attribute_list, get_place_attribute_list\nfrom tool.experiment import SLI, RAN, ALGORITHM, \\\n CQL_ALG, QUERY, Q_MOVE, Q_P...
[ { "content": "Return the code unaltered:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n'''\nQueries for experiments with ENDSEQ operator\n'''\n\nimport os\n\nfrom tool.attributes import get_move_attribute_list, get_place_attribute_list\nfrom tool.experiment import SLI, RAN, ALGORITHM, \\\n CQL_ALG, QU...
```python # -*- coding: utf-8 -*- ''' Queries for experiments with ENDSEQ operator ''' import os from tool.attributes import get_move_attribute_list, get_place_attribute_list from tool.experiment import SLI, RAN, ALGORITHM, \ CQL_ALG, QUERY, Q_MOVE, Q_PLACE from tool.io import get_query_dir, write_to_txt, get_out_file, get_env_file from tool.query.stream import get_register_stream, REG_Q_OUTPUT_STR, REG_Q_STR # ============================================================================= # Query using ENDSEQ operator # ============================================================================= ENDSEQ_QUERY = ''' SELECT SUBSEQUENCE END POSITION FROM SEQUENCE IDENTIFIED BY player_id [RANGE {ran} SECOND, SLIDE {sli} SECOND] FROM s; ''' # ============================================================================= # CQL Queries # ============================================================================= # Query to get sequence from stream CQL_Z = ''' SELECT SEQUENCE IDENTIFIED BY player_id [RANGE {ran} SECOND, SLIDE {sli} SECOND] FROM s; ''' # Query equivalent to ENDSEQ operator CQL_EQUIV = ''' SELECT _pos - {ran} + 1 AS _pos, {att} FROM z WHERE _pos >= {ran} ''' def gen_endseq_query(configuration, experiment_conf): ''' Generate ENDSEQ query ''' query_dir = get_query_dir(configuration, experiment_conf) filename = query_dir + os.sep + 'endseq.cql' query = ENDSEQ_QUERY.format(ran=experiment_conf[RAN], sli=experiment_conf[SLI]) write_to_txt(filename, query) def gen_cql_z_query(query_dir, experiment_conf): ''' Consider RANGE and SLIDE and generate Z relation ''' query = CQL_Z.format(ran=experiment_conf[RAN], sli=experiment_conf[SLI]) filename = query_dir + os.sep + 'z.cql' write_to_txt(filename, query) def gen_cql_final_query(query_dir, experiment_conf): ''' Generate final query CQL query ''' filename = query_dir + os.sep + 'equiv.cql' if os.path.isfile(filename): return range_value = experiment_conf[RAN] if experiment_conf[QUERY] == Q_MOVE: att_list = get_move_attribute_list('z.') elif experiment_conf[QUERY] == Q_PLACE: att_list = get_place_attribute_list('z.') att_str = ', '.join(att_list) pos_query_list = [] for position in range(1, range_value + 1): pos_query = CQL_EQUIV.format(att=att_str, ran=position) pos_query_list.append(pos_query) query = '\nUNION\n'.join(pos_query_list) + ';' out_file = open(filename, 'w') out_file.write(query) out_file.close() def gen_cql_queries(configuration, experiment_conf): ''' Generate CQL queries ''' query_dir = get_query_dir(configuration, experiment_conf) gen_cql_z_query(query_dir, experiment_conf) gen_cql_final_query(query_dir, experiment_conf) def gen_all_queries(configuration, experiment_list): ''' Generate all queries ''' for exp_conf in experiment_list: if exp_conf[ALGORITHM] == CQL_ALG: gen_cql_queries(configuration, exp_conf) else: gen_endseq_query(configuration, exp_conf) def gen_endseq_env(configuration, experiment_conf, output): ''' Generate environment for ENDSEQ ''' text = get_register_stream(experiment_conf) # Get query filename query_dir = get_query_dir(configuration, experiment_conf) filename = query_dir + os.sep + 'endseq.cql' # Register query if output: # Get output filename out_file = get_out_file(configuration, experiment_conf) text += REG_Q_OUTPUT_STR.format(qname='endseq', qfile=filename, ofile=out_file) else: text += REG_Q_STR.format(qname='endseq', qfile=filename) # Get environment filename filename = get_env_file(configuration, experiment_conf) write_to_txt(filename, text) def gen_cql_env(configuration, experiment_conf, output): ''' Generate environment for CQL ''' text = get_register_stream(experiment_conf) query_dir = get_query_dir(configuration, experiment_conf) # Environment files for equivalent CQL queries filename = query_dir + os.sep + 'z.cql' text += REG_Q_STR.format(qname='z', qfile=filename) # Final equivalent query filename = query_dir + os.sep + 'equiv.cql' if output: # Get output filename out_file = get_out_file(configuration, experiment_conf) text += REG_Q_OUTPUT_STR.format(qname='equiv', qfile=filename, ofile=out_file) else: text += REG_Q_STR.format(qname='equiv', qfile=filename) filename = get_env_file(configuration, experiment_conf) write_to_txt(filename, text) def gen_all_env(configuration, experiment_list, output=False): ''' Generate all environments ''' for exp_conf in experiment_list: if exp_conf[ALGORITHM] == CQL_ALG: gen_cql_env(configuration, exp_conf, output) else: gen_endseq_env(configuration, exp_conf, output) ```
[ { "content": "Produce an exact reconstruction of the code:\n```python\nfrom __future__ import division, print_function\n\nimport operator\nfrom collections import OrderedDict\nimport numpy as np\nimport pandas as pd\nimport toolz\nfrom tornado.ioloop import IOLoop\nfrom tornado import gen\n\nfrom ..collection i...
[ { "content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\nfrom __future__ import division, print_function\n\nimport operator\nfrom collections import OrderedDict\nimport numpy as np\nimport pandas as pd\nimport toolz\nfrom tornado.ioloop import IOLoop\nfrom tornado import gen\n\nfro...
```python from __future__ import division, print_function import operator from collections import OrderedDict import numpy as np import pandas as pd import toolz from tornado.ioloop import IOLoop from tornado import gen from ..collection import Streaming, _stream_types, OperatorMixin from ..sources import Source from ..utils import M from . import aggregations from .utils import is_dataframe_like, is_series_like, is_index_like, \ get_base_frame_type, get_dataframe_package class BaseFrame(Streaming): def round(self, decimals=0): """ Round elements in frame """ return self.map_partitions(M.round, self, decimals=decimals) def reset_index(self): """ Reset Index """ return self.map_partitions(M.reset_index, self) def set_index(self, index, **kwargs): """ Set Index """ return self.map_partitions(M.set_index, self, index, **kwargs) def tail(self, n=5): """ Round elements in frame """ return self.map_partitions(M.tail, self, n=n) def astype(self, dt): return self.map_partitions(M.astype, self, dt) @property def index(self): return self.map_partitions(lambda x: x.index, self) def map(self, func, na_action=None): return self.map_partitions(self._subtype.map, self, func, na_action=na_action) class Frame(BaseFrame): _stream_type = 'streaming' def groupby(self, other): """ Groupby aggregations """ return GroupBy(self, other) def aggregate(self, aggregation, start=None): return self.accumulate_partitions(aggregations.accumulator, agg=aggregation, start=start, stream_type='updating', returns_state=True) def sum(self, start=None): """ Sum frame. Parameters ---------- start: None or resulting Python object type from the operation Accepts a valid start state. """ return self.aggregate(aggregations.Sum(), start) def count(self, start=None): """ Count of frame Parameters ---------- start: None or resulting Python object type from the operation Accepts a valid start state. """ return self.aggregate(aggregations.Count(), start) @property def size(self): """ size of frame """ return self.aggregate(aggregations.Size()) def mean(self, start=None): """ Average frame Parameters ---------- start: None or resulting Python object type from the operation Accepts a valid start state. """ return self.aggregate(aggregations.Mean(), start) def rolling(self, window, min_periods=1, with_state=False, start=()): """ Compute rolling aggregations When followed by an aggregation method like ``sum``, ``mean``, or ``std`` this produces a new Streaming dataframe whose values are aggregated over that window. The window parameter can be either a number of rows or a timedelta like ``"2 minutes"` in which case the index should be a datetime index. This operates by keeping enough of a backlog of records to maintain an accurate stream. It performs a copy at every added dataframe. Because of this it may be slow if the rolling window is much larger than the average stream element. Parameters ---------- window: int or timedelta Window over which to roll with_state: bool (False) Whether to return the state along with the result as a tuple (state, result). State may be needed downstream for a number of reasons like checkpointing. start: () or resulting Python object type from the operation Accepts a valid start state. Returns ------- Rolling object See Also -------- DataFrame.window: more generic window operations """ return Rolling(self, window, min_periods, with_state, start) def window(self, n=None, value=None, with_state=False, start=None): """ Sliding window operations Windowed operations are defined over a sliding window of data, either with a fixed number of elements:: >>> df.window(n=10).sum() # sum of the last ten elements or over an index value range (index must be monotonic):: >>> df.window(value='2h').mean() # average over the last two hours Windowed dataframes support all normal arithmetic, aggregations, and groupby-aggregations. Parameters ---------- n: int Window of number of elements over which to roll value: str Window of time over which to roll with_state: bool (False) Whether to return the state along with the result as a tuple (state, result). State may be needed downstream for a number of reasons like checkpointing. start: None or resulting Python object type from the operation Accepts a valid start state. Examples -------- >>> df.window(n=10).std() >>> df.window(value='2h').count() >>> w = df.window(n=100) >>> w.groupby(w.name).amount.sum() >>> w.groupby(w.x % 10).y.var() See Also -------- DataFrame.rolling: mimic's Pandas rolling aggregations """ return Window(self, n=n, value=value, with_state=with_state, start=start) def _cumulative_aggregation(self, op): return self.accumulate_partitions(_cumulative_accumulator, returns_state=True, start=(), op=op) def cumsum(self): """ Cumulative sum """ return self._cumulative_aggregation(op='cumsum') def cumprod(self): """ Cumulative product """ return self._cumulative_aggregation(op='cumprod') def cummin(self): """ Cumulative minimum """ return self._cumulative_aggregation(op='cummin') def cummax(self): """ Cumulative maximum """ return self._cumulative_aggregation(op='cummax') class Frames(BaseFrame): _stream_type = 'updating' def sum(self, **kwargs): return self.map_partitions(M.sum, self, **kwargs) def mean(self, **kwargs): return self.map_partitions(M.mean, self, **kwargs) def std(self, **kwargs): return self.map_partitions(M.std, self, **kwargs) def var(self, **kwargs): return self.map_partitions(M.var, self, **kwargs) @property def size(self, **kwargs): return self.map_partitions(M.size, self, **kwargs) def count(self, **kwargs): return self.map_partitions(M.count, self, **kwargs) def nlargest(self, n, *args, **kwargs): return self.map_partitions(M.nlargest, self, n, *args, **kwargs) def tail(self, n=5): """ Round elements in frame """ return self.map_partitions(M.tail, self, n=n) class _DataFrameMixin(object): @property def columns(self): return self.example.columns @property def dtypes(self): return self.example.dtypes def __getitem__(self, index): return self.map_partitions(operator.getitem, self, index) def __getattr__(self, key): if key in self.columns or not len(self.columns): return self.map_partitions(getattr, self, key) else: raise AttributeError("DataFrame has no attribute %r" % key) def __dir__(self): o = set(dir(type(self))) o.update(self.__dict__) o.update(c for c in self.columns if (isinstance(c, str) and c.isidentifier())) return list(o) def assign(self, **kwargs): """ Assign new columns to this dataframe Alternatively use setitem syntax Examples -------- >>> sdf = sdf.assign(z=sdf.x + sdf.y) # doctest: +SKIP >>> sdf['z'] = sdf.x + sdf.y # doctest: +SKIP """ kvs = list(toolz.concat(kwargs.items())) def _assign(df, *kvs): keys = kvs[::2] values = kvs[1::2] kwargs = OrderedDict(zip(keys, values)) return df.assign(**kwargs) return self.map_partitions(_assign, self, *kvs) def to_frame(self): """ Convert to a streaming dataframe """ return self def __setitem__(self, key, value): if isinstance(value, Series): result = self.assign(**{key: value}) elif isinstance(value, DataFrame): result = self.assign(**{k: value[c] for k, c in zip(key, value.columns)}) else: example = self.example.copy() example[key] = value df_type = type(self.example) result = self.map_partitions(df_type.assign, self, **{key: value}) self.stream = result.stream self.example = result.example return self def query(self, expr, **kwargs): df_type = type(self.example) return self.map_partitions(df_type.query, self, expr, **kwargs) class DataFrame(Frame, _DataFrameMixin): """ A Streaming Dataframe This is a logical collection over a stream of Pandas dataframes. Operations on this object will translate to the appropriate operations on the underlying Pandas dataframes. See Also -------- Series """ def __init__(self, *args, **kwargs): # {'x': sdf.x + 1, 'y': sdf.y - 1} if len(args) == 1 and not kwargs and isinstance(args[0], dict): def concat(tup, module=None, columns=None): result = module.concat(tup, axis=1) result.columns = columns return result columns, values = zip(*args[0].items()) base_frame_type = values[0]._subtype df_package = get_dataframe_package(base_frame_type) stream = type(values[0].stream).zip(*[v.stream for v in values]) stream = stream.map(concat, module=df_package, columns=list(columns)) example = df_package.DataFrame({k: getattr(v, 'example', v) for k, v in args[0].items()}) DataFrame.__init__(self, stream, example) else: example = None if "example" in kwargs: example = kwargs.get('example') elif len(args) > 1: example = args[1] self._subtype = get_base_frame_type(self.__class__.__name__, is_dataframe_like, example) super(DataFrame, self).__init__(*args, **kwargs) def verify(self, x): """ Verify consistency of elements that pass through this stream """ super(DataFrame, self).verify(x) if list(x.columns) != list(self.example.columns): raise IndexError("Input expected to have columns %s, got %s" % (self.example.columns, x.columns)) class _SeriesMixin(object): @property def dtype(self): return self.example.dtype def to_frame(self): """ Convert to a streaming dataframe """ return self.map_partitions(M.to_frame, self) class Series(Frame, _SeriesMixin): """ A Streaming Series This is a logical collection over a stream of Pandas series objects. Operations on this object will translate to the appropriate operations on the underlying Pandas series. See Also -------- DataFrame """ def __init__(self, *args, **kwargs): example = None if "example" in kwargs: example = kwargs.get('example') elif len(args) > 1: example = args[1] if isinstance(self, Index): self._subtype = get_base_frame_type(self.__class__.__name__, is_index_like, example) else: self._subtype = get_base_frame_type(self.__class__.__name__, is_series_like, example) super(Series, self).__init__(*args, **kwargs) def value_counts(self): return self.accumulate_partitions(aggregations.accumulator, agg=aggregations.ValueCounts(), start=None, stream_type='updating', returns_state=True) class Index(Series): pass class DataFrames(Frames, _DataFrameMixin): pass class Seriess(Frames, _SeriesMixin): pass def _cumulative_accumulator(state, new, op=None): if not len(new): return state, new if not len(state): df = new else: df_package = get_dataframe_package(new) df = df_package.concat([state, new]) # ouch, full copy result = getattr(df, op)() new_state = result.iloc[-1:] if len(state): result = result[1:] return new_state, result class Rolling(object): """ Rolling aggregations This intermediate class enables rolling aggregations across either a fixed number of rows or a time window. Examples -------- >>> sdf.rolling(10).x.mean() # doctest: +SKIP >>> sdf.rolling('100ms').x.mean() # doctest: +SKIP """ def __init__(self, sdf, window, min_periods, with_state, start): self.root = sdf if not isinstance(window, int): window = pd.Timedelta(window) min_periods = 1 self.window = window self.min_periods = min_periods self.with_state = with_state self.start = start def __getitem__(self, key): sdf = self.root[key] return Rolling(sdf, self.window, self.min_periods, self.with_state, self.start) def __getattr__(self, key): if key in self.root.columns or not len(self.root.columns): return self[key] else: raise AttributeError("Rolling has no attribute %r" % key) def _known_aggregation(self, op, *args, **kwargs): return self.root.accumulate_partitions(rolling_accumulator, window=self.window, op=op, args=args, kwargs=kwargs, start=self.start, returns_state=True, with_state=self.with_state) def sum(self): """ Rolling sum """ return self._known_aggregation('sum') def mean(self): """ Rolling mean """ return self._known_aggregation('mean') def min(self): """ Rolling minimum """ return self._known_aggregation('min') def max(self): """ Rolling maximum """ return self._known_aggregation('max') def median(self): """ Rolling median """ return self._known_aggregation('median') def std(self, *args, **kwargs): """ Rolling standard deviation """ return self._known_aggregation('std', *args, **kwargs) def var(self, *args, **kwargs): """ Rolling variance """ return self._known_aggregation('var', *args, **kwargs) def count(self, *args, **kwargs): """ Rolling count """ return self._known_aggregation('count', *args, **kwargs) def aggregate(self, *args, **kwargs): """ Rolling aggregation """ return self._known_aggregation('aggregate', *args, **kwargs) def quantile(self, *args, **kwargs): """ Rolling quantile """ return self._known_aggregation('quantile', *args, **kwargs) class Window(OperatorMixin): """ Windowed aggregations This provides a set of aggregations that can be applied over a sliding window of data. See Also -------- DataFrame.window: contains full docstring """ def __init__(self, sdf, n=None, value=None, with_state=False, start=None): if value is None and isinstance(n, (str, pd.Timedelta)): value = n n = None self.n = n self.root = sdf if isinstance(value, str) and isinstance(self.root.example.index, pd.DatetimeIndex): value = pd.Timedelta(value) self.value = value self.with_state = with_state self.start = start def __getitem__(self, key): sdf = self.root[key] return Window(sdf, n=self.n, value=self.value, with_state=self.with_state, start=self.start) def __getattr__(self, key): if key in self.root.columns or not len(self.root.columns): return self[key] else: raise AttributeError("Window has no attribute %r" % key) def map_partitions(self, func, *args, **kwargs): args2 = [a.root if isinstance(a, Window) else a for a in args] root = self.root.map_partitions(func, *args2, **kwargs) return Window(root, n=self.n, value=self.value, with_state=self.with_state, start=self.start) @property def index(self): return self.map_partitions(lambda x: x.index, self) @property def columns(self): return self.root.columns @property def dtypes(self): return self.root.dtypes @property def example(self): return self.root.example def reset_index(self): return Window(self.root.reset_index(), n=self.n, value=self.value) def aggregate(self, agg): if self.n is not None: diff = aggregations.diff_iloc window = self.n elif self.value is not None: diff = aggregations.diff_loc window = self.value return self.root.accumulate_partitions(aggregations.window_accumulator, diff=diff, window=window, agg=agg, start=self.start, returns_state=True, stream_type='updating', with_state=self.with_state) def full(self): return self.aggregate(aggregations.Full()) def apply(self, func): """ Apply an arbitrary function over each window of data """ result = self.aggregate(aggregations.Full()) return result.map_partitions(func, result) def sum(self): """ Sum elements within window """ return self.aggregate(aggregations.Sum()) def count(self): """ Count elements within window """ return self.aggregate(aggregations.Count()) def mean(self): """ Average elements within window """ return self.aggregate(aggregations.Mean()) def var(self, ddof=1): """ Compute variance of elements within window """ return self.aggregate(aggregations.Var(ddof=ddof)) def std(self, ddof=1): """ Compute standard deviation of elements within window """ return self.var(ddof=ddof) ** 0.5 @property def size(self): """ Number of elements within window """ return self.aggregate(aggregations.Size()) def value_counts(self): """ Count groups of elements within window """ return self.aggregate(aggregations.ValueCounts()) def groupby(self, other): """ Groupby-aggregations within window """ return WindowedGroupBy(self.root, other, None, self.n, self.value, self.with_state, self.start) def rolling_accumulator(acc, new, window=None, op=None, with_state=False, args=(), kwargs={}): if len(acc): df_package = get_dataframe_package(new) df = df_package.concat([acc, new]) else: df = new result = getattr(df.rolling(window), op)(*args, **kwargs) if isinstance(window, int): new_acc = df.iloc[-window:] else: new_acc = df.loc[result.index.max() - window:] result = result.iloc[len(acc):] return new_acc, result def _accumulate_mean(accumulator, new): accumulator = accumulator.copy() accumulator['sums'] += new.sum() accumulator['counts'] += new.count() result = accumulator['sums'] / accumulator['counts'] return accumulator, result def _accumulate_sum(accumulator, new): return accumulator + new.sum() def _accumulate_size(accumulator, new): return accumulator + new.size() class GroupBy(object): """ Groupby aggregations on streaming dataframes """ def __init__(self, root, grouper, index=None): self.root = root self.grouper = grouper self.index = index def __getitem__(self, index): return GroupBy(self.root, self.grouper, index) def __getattr__(self, key): if key in self.root.columns or not len(self.root.columns): return self[key] else: raise AttributeError("GroupBy has no attribute %r" % key) def _accumulate(self, Agg, with_state=False, start=None, **kwargs): stream_type = 'updating' if isinstance(self.grouper, Streaming): stream = self.root.stream.zip(self.grouper.stream) grouper_example = self.grouper.example agg = Agg(self.index, grouper=None, **kwargs) else: stream = self.root.stream grouper_example = self.grouper agg = Agg(self.index, grouper=self.grouper, **kwargs) # Compute example state = agg.initial(self.root.example, grouper=grouper_example) if hasattr(grouper_example, 'iloc'): grouper_example = grouper_example.iloc[:0] elif isinstance(grouper_example, np.ndarray) or is_index_like(grouper_example): grouper_example = grouper_example[:0] _, example = agg.on_new(state, self.root.example.iloc[:0], grouper=grouper_example) outstream = stream.accumulate(aggregations.groupby_accumulator, agg=agg, start=start, returns_state=True, with_state=with_state) for fn, s_type in _stream_types[stream_type]: """Function checks if example is of a specific frame type""" if fn(example): return s_type(outstream, example) return Streaming(outstream, example, stream_type=stream_type) def count(self, start=None): """ Groupby-count Parameters ---------- start: None or resulting Python object type from the operation Accepts a valid start state. """ return self._accumulate(aggregations.GroupbyCount, start=start) def mean(self, with_state=False, start=None): """ Groupby-mean Parameters ---------- start: None or resulting Python object type from the operation Accepts a valid start state. """ return self._accumulate(aggregations.GroupbyMean, with_state=with_state, start=start) def size(self): """ Groupby-size """ return self._accumulate(aggregations.GroupbySize) def std(self, ddof=1): """ Groupby-std """ return self.var(ddof=ddof) ** 0.5 def sum(self, start=None): """ Groupby-sum Parameters ---------- start: None or resulting Python object type from the operation Accepts a valid start state. """ return self._accumulate(aggregations.GroupbySum, start=start) def var(self, ddof=1): """ Groupby-variance """ return self._accumulate(aggregations.GroupbyVar, ddof=ddof) class WindowedGroupBy(GroupBy): """ Groupby aggregations over a window of data """ def __init__(self, root, grouper, index=None, n=None, value=None, with_state=False, start=None): self.root = root self.grouper = grouper self.index = index self.n = n if isinstance(value, str) and isinstance(self.root.example.index, pd.DatetimeIndex): value = pd.Timedelta(value) self.value = value self.with_state = with_state self.start = start def __getitem__(self, index): return WindowedGroupBy(self.root, self.grouper, index, self.n, self.value, self.with_state, self.start) def _accumulate(self, Agg, **kwargs): stream_type = 'updating' if isinstance(self.grouper, Streaming): stream = self.root.stream.zip(self.grouper.stream) grouper_example = self.grouper.example agg = Agg(self.index, grouper=None, **kwargs) elif isinstance(self.grouper, Window): stream = self.root.stream.zip(self.grouper.root.stream) grouper_example = self.grouper.root.example agg = Agg(self.index, grouper=None, **kwargs) else: stream = self.root.stream grouper_example = self.grouper agg = Agg(self.index, grouper=self.grouper, **kwargs) # Compute example state = agg.initial(self.root.example, grouper=grouper_example) if hasattr(grouper_example, 'iloc'): grouper_example = grouper_example.iloc[:0] elif isinstance(grouper_example, np.ndarray) or is_index_like(grouper_example): grouper_example = grouper_example[:0] _, example = agg.on_new(state, self.root.example.iloc[:0], grouper=grouper_example) if self.n is not None: diff = aggregations.diff_iloc window = self.n elif self.value is not None: diff = aggregations.diff_loc window = self.value outstream = stream.accumulate(aggregations.windowed_groupby_accumulator, agg=agg, start=self.start, returns_state=True, diff=diff, window=window, with_state=self.with_state) for fn, s_type in _stream_types[stream_type]: """Function checks if example is of a specific frame type""" if fn(example): return s_type(outstream, example) return Streaming(outstream, example, stream_type=stream_type) def random_datapoint(now, **kwargs): """Example of querying a single current value""" return pd.DataFrame( {'a': np.random.random(1)}, index=[now]) def random_datablock(last, now, **kwargs): """ Example of querying over a time range since last update Parameters ---------- last: pd.Timestamp Time of previous call to this function. now: pd.Timestamp Current time. freq: pd.Timedelta, optional The time interval between individual records to be returned. For good throughput, should be much smaller than the interval at which this function is called. Returns a pd.DataFrame with random values where: The x column is uniformly distributed. The y column is Poisson distributed. The z column is normally distributed. """ freq = kwargs.get("freq", pd.Timedelta("100ms")) index = pd.date_range(start=last + freq, end=now, freq=freq) df = pd.DataFrame({'x': np.random.random(len(index)), 'y': np.random.poisson(size=len(index)), 'z': np.random.normal(0, 1, size=len(index))}, index=index) return df class PeriodicDataFrame(DataFrame): """A streaming dataframe using the asyncio ioloop to poll a callback fn Parameters ---------- datafn: callable Callback function accepting **kwargs and returning a pd.DataFrame. kwargs will include at least 'last' (pd.Timestamp.now() when datafn was last invoked), and 'now' (current pd.Timestamp.now()). interval: timedelta The time interval between new dataframes. dask: boolean If true, uses a DaskStream instead of a regular Source. **kwargs: Optional keyword arguments to be passed into the callback function. By default, returns a three-column random pd.DataFrame generated by the 'random_datablock' function. Example ------- >>> df = PeriodicDataFrame(interval='1s', datafn=random_datapoint) # doctest: +SKIP """ def __init__(self, datafn=random_datablock, interval='500ms', dask=False, **kwargs): if dask: from streamz.dask import DaskStream source = DaskStream() loop = source.loop else: source = Source() loop = IOLoop.current() self.interval = pd.Timedelta(interval).total_seconds() self.source = source self.continue_ = [True] self.kwargs = kwargs stream = self.source.map(lambda x: datafn(**x, **kwargs)) example = datafn(last=pd.Timestamp.now(), now=pd.Timestamp.now(), **kwargs) super(PeriodicDataFrame, self).__init__(stream, example) loop.add_callback(self._cb, self.interval, self.source, self.continue_) def __del__(self): self.stop() def stop(self): self.continue_[0] = False @staticmethod @gen.coroutine def _cb(interval, source, continue_): last = pd.Timestamp.now() while continue_[0]: yield gen.sleep(interval) now = pd.Timestamp.now() yield source._emit(dict(last=last, now=now)) last = now class Random(PeriodicDataFrame): """PeriodicDataFrame providing random values by default Accepts same parameters as PeriodicDataFrame, plus `freq`, a string that will be converted to a pd.Timedelta and passed to the 'datafn'. Useful mainly for examples and docs. Example ------- >>> source = Random(freq='100ms', interval='1s') # doctest: +SKIP """ def __init__(self, freq='100ms', interval='500ms', dask=False, datafn=random_datablock): super(Random, self).__init__(datafn, interval, dask, freq=pd.Timedelta(freq)) _stream_types['streaming'].append((is_dataframe_like, DataFrame)) _stream_types['streaming'].append((is_index_like, Index)) _stream_types['streaming'].append((is_series_like, Series)) _stream_types['updating'].append((is_dataframe_like, DataFrames)) _stream_types['updating'].append((is_series_like, Seriess)) ```
[ { "content": "Here is the snippet:\n```python\n# -*- coding: utf8 -*-\n\n__author__ = 'sergey'\n\nimport hashlib\nfrom time import time\nfrom dedupsqlfs.db.mysql.table import Table\n\nclass TableSubvolume( Table ):\n\n _table_name = \"subvolume\"\n\n def create( self ):\n c = self.getCursor()\n\n ...
[ { "content": "Here is the snippet:\n<|memory_start|>```python\n# -*- coding: utf8 -*-\n\n__author__ = 'sergey'\n\nimport hashlib\nfrom time import time\nfrom dedupsqlfs.db.mysql.table import Table\n\nclass TableSubvolume( Table ):\n\n _table_name = \"subvolume\"\n\n def create( self ):\n c = self.g...
```python # -*- coding: utf8 -*- __author__ = 'sergey' import hashlib from time import time from dedupsqlfs.db.mysql.table import Table class TableSubvolume( Table ): _table_name = "subvolume" def create( self ): c = self.getCursor() # Create table c.execute( "CREATE TABLE IF NOT EXISTS `%s` (" % self.getName()+ "`id` INT UNSIGNED PRIMARY KEY AUTO_INCREMENT, "+ "`hash` BINARY(16) NOT NULL, "+ "`name` BLOB NOT NULL, "+ "`stats` TEXT, "+ "`root_diff` TEXT, "+ "`readonly` TINYINT UNSIGNED NOT NULL DEFAULT 0, "+ "`stats_at` INT UNSIGNED, "+ "`root_diff_at` INT UNSIGNED, "+ "`created_at` INT UNSIGNED NOT NULL, "+ "`mounted_at` INT UNSIGNED, "+ "`updated_at` INT UNSIGNED"+ ")"+ self._getCreationAppendString() ) self.createIndexIfNotExists('hash', ('hash',), True) return def insert( self, name, created_at, mounted_at=None, updated_at=None, stats_at=None, stats=None, root_diff_at=None, root_diff=None ): """ :param name: str - name for subvolume/snapshot :param created_at: int - creation time :param mounted_at: int|None - subvolume mounted :param updated_at: int|None - subvolume updated :return: int """ self.startTimer() cur = self.getCursor() digest = hashlib.new('md5', name).digest() cur.execute( "INSERT INTO `%s` " % self.getName()+ " (`hash`,`name`,`created_at`, `mounted_at`, `updated_at`, `stats_at`, `stats`, `root_diff_at`, `root_diff`) "+ "VALUES (X%(hash)s, X%(name)s, %(created)s, %(mounted)s, %(updated)s, %(statsed)s, %(stats)s, %(diffed)s, %(root_diff)s)", { "hash": digest.hex(), "name": name.hex(), "created": int(created_at), "mounted": mounted_at, "updated": updated_at, "statsed": stats_at, "stats": stats, "diffed": root_diff_at, "root_diff": root_diff } ) item = cur.lastrowid self.stopTimer('insert') return item def get_count(self): self.startTimer() cur = self.getCursor() cur.execute("SELECT COUNT(1) as `cnt` FROM `%s`" % self.getName()) item = cur.fetchone() if item: item = item["cnt"] else: item = 0 self.stopTimer('get_count') return item def readonly(self, subvol_id, flag=True): self.startTimer() if flag: flag = 1 else: flag = 0 cur = self.getCursor() cur.execute( "UPDATE `%s` " % self.getName()+ " SET `readonly`=%(readonly)s WHERE `id`=%(id)s", { "readonly": flag, "id": subvol_id } ) self.stopTimer('readonly') return cur.rowcount def mount_time(self, subvol_id, mtime=None): self.startTimer() if mtime is None: mtime = time() cur = self.getCursor() cur.execute( "UPDATE `%s` " % self.getName()+ " SET `mounted_at`=%(mounted)s WHERE `id`=%(id)s", { "mounted": int(mtime), "id": subvol_id } ) self.stopTimer('mount_time') return cur.rowcount def update_time(self, subvol_id, utime=None): self.startTimer() if utime is None: utime = time() cur = self.getCursor() cur.execute( "UPDATE `%s` " % self.getName()+ " SET `updated_at`=%(updated)s WHERE `id`=%(id)s", { "updated": int(utime), "id": subvol_id } ) self.stopTimer('update_time') return cur.rowcount def stats_time(self, subvol_id, stime=None): self.startTimer() if stime is None: stime = time() cur = self.getCursor() cur.execute( "UPDATE `%s` " % self.getName()+ " SET `stats_at`=%(stime)s WHERE `id`=%(id)s", { "stime": int(stime), "id": subvol_id } ) self.stopTimer('stats_time') return cur.rowcount def set_stats(self, subvol_id, stats): self.startTimer() cur = self.getCursor() cur.execute( "UPDATE `%s` " % self.getName()+ " SET `stats`=%(stats)s WHERE `id`=%(id)s", { "stats": stats, "id": subvol_id } ) self.stopTimer('set_stats') return cur.rowcount def root_diff_time(self, subvol_id, rtime=None): self.startTimer() if rtime is None: rtime = time() cur = self.getCursor() cur.execute( "UPDATE `%s` " % self.getName()+ " SET `root_diff_at`=%(rtime)s WHERE `id`=%(id)s", { "rtime": int(rtime), "id": subvol_id } ) self.stopTimer('stats_time') return cur.rowcount def set_root_diff(self, subvol_id, root_diff): self.startTimer() cur = self.getCursor() cur.execute( "UPDATE `%s` " % self.getName()+ " SET `root_diff`=%(rdiff)s WHERE `id`=%(id)s", { "rdiff": root_diff, "id": subvol_id } ) self.stopTimer('set_stats') return cur.rowcount def delete(self, subvol_id): self.startTimer() cur = self.getCursor() cur.execute( "DELETE FROM `%s` " % self.getName()+ " WHERE `id`=%(id)s", { "id": subvol_id } ) item = cur.rowcount self.stopTimer('delete') return item def get(self, subvol_id): self.startTimer() cur = self.getCursor() cur.execute( "SELECT * FROM `%s` " % self.getName()+ " WHERE `id`=%(id)s", { "id": int(subvol_id) } ) item = cur.fetchone() self.stopTimer('get') return item def find(self, name): self.startTimer() cur = self.getCursor() digest = hashlib.new('md5', name).digest() cur.execute( "SELECT * FROM `%s` " % self.getName()+ " WHERE `hash`=X%(hash)s", { "hash": digest.hex() } ) item = cur.fetchone() self.stopTimer('find') return item def get_ids(self, order_by=None, order_dir="ASC"): self.startTimer() cur = self.getCursor() sql = "SELECT id FROM `%s`" % self.getName() if order_by: sql += " ORDER BY `%s` %s" % (order_by, order_dir,) cur.execute(sql) items = (item["id"] for item in cur.fetchall()) self.stopTimer('get_ids') return items pass ```
[ { "content": "Recreate the original code text:\n```python\n# -*- coding: utf-8 -*-\n#\n# Tolk documentation build configuration file, created by\n# sphinx-quickstart on Wed Sep 30 13:15:07 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possi...
[ { "content": "Recreate the original code text:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#\n# Tolk documentation build configuration file, created by\n# sphinx-quickstart on Wed Sep 30 13:15:07 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note th...
```python # -*- coding: utf-8 -*- # # Tolk documentation build configuration file, created by # sphinx-quickstart on Wed Sep 30 13:15:07 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) sys.path.insert(0, os.path.abspath('../../')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Tolk' copyright = u'2015, Auke Willem Oosterhoff <auke@orangetux.nl>' author = u'Auke Willem Oosterhoff <auke@orangetux.nl>' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.2.1' # The full version, including alpha/beta/rc tags. release = '0.2.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = { 'index': ['globaltoc.html', 'searchbox.html'], '**': ['globaltoc.html', 'relations.html', 'searchbox.html'] } # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'Tolkdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'Tolk.tex', u'Tolk Documentation', u'Auke Willem Oosterhoff \\textless{}auke@orangetux.nl\\textgreater{}', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'tolk', u'Tolk Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'Tolk', u'Tolk Documentation', author, 'Tolk', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False ```
[ { "content": "Provide an exact copy of the source code:\n```python\n\"\"\"\nProvide container class for gas chemistry data.\n\"\"\"\nimport hashlib\nimport warnings\n\nimport numpy as np\nimport tables\nfrom tables.exceptions import NoSuchNodeError, NodeError\n\nfrom spectroscopy.class_factory import ResourceId...
[ { "content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n\"\"\"\nProvide container class for gas chemistry data.\n\"\"\"\nimport hashlib\nimport warnings\n\nimport numpy as np\nimport tables\nfrom tables.exceptions import NoSuchNodeError, NodeError\n\nfrom spectroscopy.class_factory i...
```python """ Provide container class for gas chemistry data. """ import hashlib import warnings import numpy as np import tables from tables.exceptions import NoSuchNodeError, NodeError from spectroscopy.class_factory import ResourceIdentifier from spectroscopy.plugins import get_registered_plugins from spectroscopy import datamodel class Dataset(object): """ This class is a container for all data describing a spectroscopy analysis from the raw measurements, over instruments and information on gas plumes to the final gas flux results. :type preferredFluxIDs: list :param preferredFluxIDs: IDs of the best/final flux estimate. As a dataset can contain analyses from different targets, there can be more than one preferred flux estimate. :type spectra: list :param spectra: List of all spectra that are part of the dataset. :type instruments: list :param instruments: List of all instruments that are part of the dataset. :type retrievals: list :param retrievals: List of all retrievals that are part of the dataset. :type plumevelocities: list :param plumevelocities: List of all plume velocities that are part of the dataset. :type targets: list :param targets: List of all target plumes that are part of the dataset. :type flux: list :param flux: List of all flux estimates that are part of the dataset. """ def __init__(self, filename, mode): self.elements = {} self.base_elements = {} for _c in datamodel.all_classes: name = _c.__name__.strip('_') self.elements[name] = [] self.base_elements[name+'Buffer'] = _c self._rids = {} self._f = tables.open_file(filename, mode) # Create an array of sha224 hash values; when # opening an existing file this will throw an # exception try: self._f.create_earray('/', 'hash', tables.StringAtom(itemsize=28), (0,)) except NodeError: pass def __del__(self): self._f.close() def __add__(self, other): msg = "__add__ is undefined as the return value would " msg += "be a new hdf5 file with unknown filename." raise AttributeError(msg) def __iadd__(self, other): if self._f == other._f: raise ValueError("You can't add a dataset to itself.") update_refs = [] rid_dict = {} for _e in list(other.elements.keys()): for _k in other.elements[_e]: ne = self._copy_children(_k, copyuserattrs=True) self.elements[_e].append(ne) update_refs.append(ne) rid_dict[str(_k._resource_id)] = str(ne._resource_id) for ne in update_refs: for _k, prop_type in ne._references.items(): ref = getattr(ne._root._v_attrs, _k, None) if ref is not None: if prop_type[0] == np.ndarray: newentry = [] for iref in ref: newentry.append(rid_dict[iref.decode('ascii')] .encode('ascii')) ne._root._v_attrs[_k] = np.array(newentry) else: ne._root._v_attrs[_k] = (rid_dict[ref.decode('ascii')] .encode('ascii')) return self def _newdst_group(self, dstgroup, title='', filters=None): """ Create the destination group in a new HDF5 file. """ group = self._f.root # Now, create the new group. This works even if dstgroup == '/' for nodename in dstgroup.split('/'): if nodename == '': continue # First try if possible intermediate groups already exist. try: group2 = self._f.get_node(group, nodename) except NoSuchNodeError: # The group does not exist. Create it. with warnings.catch_warnings(): warnings.simplefilter('ignore') group2 = self._f.create_group(group, nodename, title=title, filters=filters) group = group2 return group def _copy_children(self, src, title='', recursive=True, filters=None, copyuserattrs=False, overwrtnodes=False): """ Copy the children from source group to destination group """ srcgroup = src._root # assign a new resource ID so that both objects can # be referred to within the same session dstgroup = srcgroup._v_parent._v_pathname+'/' dstgroup += str(ResourceIdentifier()) # Create the new group dstgroup = self._newdst_group(dstgroup, title, filters) # Copy the attributes to dstgroup, if needed if copyuserattrs: srcgroup._v_attrs._f_copy(dstgroup) # Finally, copy srcgroup children to dstgroup try: srcgroup._f_copy_children( dstgroup, recursive=recursive, filters=filters, copyuserattrs=copyuserattrs, overwrite=overwrtnodes) except: msg = "Problems doing the copy of '{:s}'.".format(dstgroup) msg += "Please check that the node names are not " msg += "duplicated in destination, and if so, enable " msg += "overwriting nodes if desired." raise RuntimeError(msg) return type(src)(dstgroup) def new(self, data_buffer, pedantic=False): """ Create a new entry in the HDF5 file from the given data buffer. """ if pedantic: s = hashlib.sha224() # If data buffer is empty raise an exception empty = True for k, v in list(data_buffer.__dict__.items()): if k == 'tags': continue if v is not None: if k in data_buffer._properties: s.update('{}'.format(v).encode('utf-8')) empty = False if empty: msg = "You can't add empty buffers if 'pedantic=True'." raise ValueError(msg) _C = self.base_elements[type(data_buffer).__name__] group_name = _C.__name__.strip('_') rid = ResourceIdentifier() try: self._f.create_group('/', group_name) except tables.NodeError: pass with warnings.catch_warnings(): warnings.simplefilter('ignore') group = self._f.create_group('/'+group_name, str(rid)) e = _C(group, data_buffer, pedantic=pedantic) self.elements[group_name].append(e) return e def read(self, filename, ftype, **kwargs): """ Read in a datafile. """ plugins = get_registered_plugins() pg = plugins[ftype.lower()]() return pg.read(self, filename, **kwargs) @staticmethod def open(filename): """ Open an existing HDF5 file. """ dnew = Dataset(filename, 'r+') for group in dnew._f.walk_groups('/'): if group._v_name is '/' or group._v_name+'Buffer' \ not in dnew.base_elements: continue for sgroup in list(group._v_groups.keys()): _C = dnew.base_elements[group._v_name+'Buffer'] e = _C(group._v_groups[sgroup]) dnew.elements[group._v_name].append(e) return dnew def close(self): """ Close the HDF5 file and clear the ResourceIdentifiers. """ for g in self.elements: for e in self.elements[g]: del e._resource_id self._f.close() def register_tags(self, tags): """ Register one or more tag names. """ try: self._f.create_group('/', 'tags') except NodeError: pass for tag in tags: try: self._f.create_earray('/tags', tag, tables.StringAtom(itemsize=60), (0,)) except NodeError: msg = "Tag '{:s}' has already been registered".format(tag) raise ValueError(msg) def remove_tags(self, tags): """ Remove one or more tag names. This will also remove the tag from every element that had been tagged. """ for tag in tags: try: ea = self._f.root.tags._v_children[tag] for rid in ea[:]: e = (ResourceIdentifier(rid.astype(np.str_)) .get_referred_object()) e.tags.remove(tag) except (KeyError, NoSuchNodeError): msg = "Can't remove tag {} as it doesn't exist.".format(tag) warnings.warn(msg) def select(self, *args, **kargs): """ Find a subset of events based on given select rules. """ try: etype = kargs['etype'] except KeyError: etype = False if etype: # retval = [] for _e in self.elements[etype]: pass if __name__ == '__main__': import doctest doctest.testmod(exclude_empty=True) ```
[ { "content": "Replicate the source code:\n```python\n#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under ...
[ { "content": "Replicate the source code:\n<|memory_start|>```python\n#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this fi...
```python # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys if sys.version >= '3': basestring = unicode = str from py4j.java_gateway import JavaClass from pyspark import RDD, since from pyspark.rdd import ignore_unicode_prefix from pyspark.sql.column import _to_seq from pyspark.sql.types import * from pyspark.sql import utils __all__ = ["DataFrameReader", "DataFrameWriter"] def to_str(value): """ A wrapper over str(), but converts bool values to lower case strings. If None is given, just returns None, instead of converting it to string "None". """ if isinstance(value, bool): return str(value).lower() elif value is None: return value else: return str(value) class OptionUtils(object): def _set_opts(self, schema=None, **options): """ Set named options (filter out those the value is None) """ if schema is not None: self.schema(schema) for k, v in options.items(): if v is not None: self.option(k, v) class DataFrameReader(OptionUtils): """ Interface used to load a :class:`DataFrame` from external storage systems (e.g. file systems, key-value stores, etc). Use :func:`spark.read` to access this. .. versionadded:: 1.4 """ def __init__(self, spark): self._jreader = spark._ssql_ctx.read() self._spark = spark def _df(self, jdf): from pyspark.sql.dataframe import DataFrame return DataFrame(jdf, self._spark) @since(1.4) def format(self, source): """Specifies the input data source format. :param source: string, name of the data source, e.g. 'json', 'parquet'. >>> df = spark.read.format('json').load('python/test_support/sql/people.json') >>> df.dtypes [('age', 'bigint'), ('name', 'string')] """ self._jreader = self._jreader.format(source) return self @since(1.4) def schema(self, schema): """Specifies the input schema. Some data sources (e.g. JSON) can infer the input schema automatically from data. By specifying the schema here, the underlying data source can skip the schema inference step, and thus speed up data loading. :param schema: a :class:`pyspark.sql.types.StructType` object or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). >>> s = spark.read.schema("col0 INT, col1 DOUBLE") """ from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() if isinstance(schema, StructType): jschema = spark._jsparkSession.parseDataType(schema.json()) self._jreader = self._jreader.schema(jschema) elif isinstance(schema, basestring): self._jreader = self._jreader.schema(schema) else: raise TypeError("schema should be StructType or string") return self @since(1.5) def option(self, key, value): """Adds an input option for the underlying data source. You can set the following option(s) for reading files: * ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps in the JSON/CSV datasources or partition values. If it isn't set, it uses the default value, session local timezone. """ self._jreader = self._jreader.option(key, to_str(value)) return self @since(1.4) def options(self, **options): """Adds input options for the underlying data source. You can set the following option(s) for reading files: * ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps in the JSON/CSV datasources or partition values. If it isn't set, it uses the default value, session local timezone. """ for k in options: self._jreader = self._jreader.option(k, to_str(options[k])) return self @since(1.4) def load(self, path=None, format=None, schema=None, **options): """Loads data from a data source and returns it as a :class`DataFrame`. :param path: optional string or a list of string for file-system backed data sources. :param format: optional string for format of the data source. Default to 'parquet'. :param schema: optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param options: all other string options >>> df = spark.read.format("parquet").load('python/test_support/sql/parquet_partitioned', ... opt1=True, opt2=1, opt3='str') >>> df.dtypes [('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')] >>> df = spark.read.format('json').load(['python/test_support/sql/people.json', ... 'python/test_support/sql/people1.json']) >>> df.dtypes [('age', 'bigint'), ('aka', 'string'), ('name', 'string')] """ if format is not None: self.format(format) if schema is not None: self.schema(schema) self.options(**options) if isinstance(path, basestring): return self._df(self._jreader.load(path)) elif path is not None: if type(path) != list: path = [path] return self._df(self._jreader.load(self._spark._sc._jvm.PythonUtils.toSeq(path))) else: return self._df(self._jreader.load()) @since(1.4) def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None, allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None, mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None, multiLine=None, allowUnquotedControlChars=None, lineSep=None, samplingRatio=None, encoding=None): """ Loads JSON files and returns the results as a :class:`DataFrame`. `JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default. For JSON (one record per file), set the ``multiLine`` parameter to ``true``. If the ``schema`` parameter is not specified, this function goes through the input once to determine the input schema. :param path: string represents path to the JSON dataset, or a list of paths, or RDD of Strings storing JSON objects. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param primitivesAsString: infers all primitive values as a string type. If None is set, it uses the default value, ``false``. :param prefersDecimal: infers all floating-point values as a decimal type. If the values do not fit in decimal, then it infers them as doubles. If None is set, it uses the default value, ``false``. :param allowComments: ignores Java/C++ style comment in JSON records. If None is set, it uses the default value, ``false``. :param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set, it uses the default value, ``false``. :param allowSingleQuotes: allows single quotes in addition to double quotes. If None is set, it uses the default value, ``true``. :param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is set, it uses the default value, ``false``. :param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character using backslash quoting mechanism. If None is set, it uses the default value, ``false``. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets other \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` \ field in an output schema. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.text.SimpleDateFormat``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.text.SimpleDateFormat``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param multiLine: parse one record, which may span multiple lines, per file. If None is set, it uses the default value, ``false``. :param allowUnquotedControlChars: allows JSON Strings to contain unquoted control characters (ASCII characters with value less than 32, including tab and line feed characters) or not. :param encoding: allows to forcibly set one of standard basic or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If None is set, the encoding of input JSON will be detected automatically when the multiLine option is set to ``true``. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. :param samplingRatio: defines fraction of input JSON objects used for schema inferring. If None is set, it uses the default value, ``1.0``. >>> df1 = spark.read.json('python/test_support/sql/people.json') >>> df1.dtypes [('age', 'bigint'), ('name', 'string')] >>> rdd = sc.textFile('python/test_support/sql/people.json') >>> df2 = spark.read.json(rdd) >>> df2.dtypes [('age', 'bigint'), ('name', 'string')] """ self._set_opts( schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal, allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames, allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero, allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat, timestampFormat=timestampFormat, multiLine=multiLine, allowUnquotedControlChars=allowUnquotedControlChars, lineSep=lineSep, samplingRatio=samplingRatio, encoding=encoding) if isinstance(path, basestring): path = [path] if type(path) == list: return self._df(self._jreader.json(self._spark._sc._jvm.PythonUtils.toSeq(path))) elif isinstance(path, RDD): def func(iterator): for x in iterator: if not isinstance(x, basestring): x = unicode(x) if isinstance(x, unicode): x = x.encode("utf-8") yield x keyed = path.mapPartitions(func) keyed._bypass_serializer = True jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString()) return self._df(self._jreader.json(jrdd)) else: raise TypeError("path can be only string, list or RDD") @since(1.4) def table(self, tableName): """Returns the specified table as a :class:`DataFrame`. :param tableName: string, name of the table. >>> df = spark.read.parquet('python/test_support/sql/parquet_partitioned') >>> df.createOrReplaceTempView('tmpTable') >>> spark.read.table('tmpTable').dtypes [('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')] """ return self._df(self._jreader.table(tableName)) @since(1.4) def parquet(self, *paths): """Loads Parquet files, returning the result as a :class:`DataFrame`. You can set the following Parquet-specific option(s) for reading Parquet files: * ``mergeSchema``: sets whether we should merge schemas collected from all \ Parquet part-files. This will override ``spark.sql.parquet.mergeSchema``. \ The default value is specified in ``spark.sql.parquet.mergeSchema``. >>> df = spark.read.parquet('python/test_support/sql/parquet_partitioned') >>> df.dtypes [('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')] """ return self._df(self._jreader.parquet(_to_seq(self._spark._sc, paths))) @ignore_unicode_prefix @since(1.6) def text(self, paths, wholetext=False, lineSep=None): """ Loads text files and returns a :class:`DataFrame` whose schema starts with a string column named "value", and followed by partitioned columns if there are any. By default, each line in the text file is a new row in the resulting DataFrame. :param paths: string, or list of strings, for input path(s). :param wholetext: if true, read each file from input path(s) as a single row. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. >>> df = spark.read.text('python/test_support/sql/text-test.txt') >>> df.collect() [Row(value=u'hello'), Row(value=u'this')] >>> df = spark.read.text('python/test_support/sql/text-test.txt', wholetext=True) >>> df.collect() [Row(value=u'hello\\nthis')] """ self._set_opts(wholetext=wholetext, lineSep=lineSep) if isinstance(paths, basestring): paths = [paths] return self._df(self._jreader.text(self._spark._sc._jvm.PythonUtils.toSeq(paths))) @since(2.0) def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None, comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None, negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None, maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None, columnNameOfCorruptRecord=None, multiLine=None, charToEscapeQuoteEscaping=None, samplingRatio=None, enforceSchema=None): """Loads a CSV file and returns the result as a :class:`DataFrame`. This function will go through the input once to determine the input schema if ``inferSchema`` is enabled. To avoid going through the entire data once, disable ``inferSchema`` option or specify the schema explicitly using ``schema``. :param path: string, or list of strings, for input path(s), or RDD of Strings storing CSV rows. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param sep: sets a single character as a separator for each field and value. If None is set, it uses the default value, ``,``. :param encoding: decodes the CSV files by the given encoding type. If None is set, it uses the default value, ``UTF-8``. :param quote: sets a single character used for escaping quoted values where the separator can be part of the value. If None is set, it uses the default value, ``"``. If you would like to turn off quotations, you need to set an empty string. :param escape: sets a single character used for escaping quotes inside an already quoted value. If None is set, it uses the default value, ``\``. :param comment: sets a single character used for skipping lines beginning with this character. By default (None), it is disabled. :param header: uses the first line as names of columns. If None is set, it uses the default value, ``false``. :param inferSchema: infers the input schema automatically from data. It requires one extra pass over the data. If None is set, it uses the default value, ``false``. :param enforceSchema: If it is set to ``true``, the specified or inferred schema will be forcibly applied to datasource files, and headers in CSV files will be ignored. If the option is set to ``false``, the schema will be validated against all headers in CSV files or the first header in RDD if the ``header`` option is set to ``true``. Field names in the schema and column names in CSV headers are checked by their positions taking into account ``spark.sql.caseSensitive``. If None is set, ``true`` is used by default. Though the default value is ``true``, it is recommended to disable the ``enforceSchema`` option to avoid incorrect results. :param ignoreLeadingWhiteSpace: A flag indicating whether or not leading whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param ignoreTrailingWhiteSpace: A flag indicating whether or not trailing whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. Since 2.0.1, this ``nullValue`` param applies to all supported types including the string type. :param nanValue: sets the string representation of a non-number value. If None is set, it uses the default value, ``NaN``. :param positiveInf: sets the string representation of a positive infinity value. If None is set, it uses the default value, ``Inf``. :param negativeInf: sets the string representation of a negative infinity value. If None is set, it uses the default value, ``Inf``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.text.SimpleDateFormat``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.text.SimpleDateFormat``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param maxColumns: defines a hard limit of how many columns a record can have. If None is set, it uses the default value, ``20480``. :param maxCharsPerColumn: defines the maximum number of characters allowed for any given value being read. If None is set, it uses the default value, ``-1`` meaning unlimited length. :param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0. If specified, it is ignored. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets other \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ A record with less/more tokens than schema is not a corrupted record to CSV. \ When it meets a record having fewer tokens than the length of the schema, \ sets ``null`` to extra fields. When the record has more tokens than the \ length of the schema, it drops extra tokens. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param multiLine: parse records, which may span multiple lines. If None is set, it uses the default value, ``false``. :param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for the quote character. If None is set, the default value is escape character when escape and quote characters are different, ``\0`` otherwise. :param samplingRatio: defines fraction of rows used for schema inferring. If None is set, it uses the default value, ``1.0``. >>> df = spark.read.csv('python/test_support/sql/ages.csv') >>> df.dtypes [('_c0', 'string'), ('_c1', 'string')] >>> rdd = sc.textFile('python/test_support/sql/ages.csv') >>> df2 = spark.read.csv(rdd) >>> df2.dtypes [('_c0', 'string'), ('_c1', 'string')] """ self._set_opts( schema=schema, sep=sep, encoding=encoding, quote=quote, escape=escape, comment=comment, header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue, nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf, dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns, maxCharsPerColumn=maxCharsPerColumn, maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, multiLine=multiLine, charToEscapeQuoteEscaping=charToEscapeQuoteEscaping, samplingRatio=samplingRatio, enforceSchema=enforceSchema) if isinstance(path, basestring): path = [path] if type(path) == list: return self._df(self._jreader.csv(self._spark._sc._jvm.PythonUtils.toSeq(path))) elif isinstance(path, RDD): def func(iterator): for x in iterator: if not isinstance(x, basestring): x = unicode(x) if isinstance(x, unicode): x = x.encode("utf-8") yield x keyed = path.mapPartitions(func) keyed._bypass_serializer = True jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString()) # see SPARK-22112 # There aren't any jvm api for creating a dataframe from rdd storing csv. # We can do it through creating a jvm dataset firstly and using the jvm api # for creating a dataframe from dataset storing csv. jdataset = self._spark._ssql_ctx.createDataset( jrdd.rdd(), self._spark._jvm.Encoders.STRING()) return self._df(self._jreader.csv(jdataset)) else: raise TypeError("path can be only string, list or RDD") @since(1.5) def orc(self, path): """Loads ORC files, returning the result as a :class:`DataFrame`. .. note:: Currently ORC support is only available together with Hive support. >>> df = spark.read.orc('python/test_support/sql/orc_partitioned') >>> df.dtypes [('a', 'bigint'), ('b', 'int'), ('c', 'int')] """ if isinstance(path, basestring): path = [path] return self._df(self._jreader.orc(_to_seq(self._spark._sc, path))) @since(1.4) def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPartitions=None, predicates=None, properties=None): """ Construct a :class:`DataFrame` representing the database table named ``table`` accessible via JDBC URL ``url`` and connection ``properties``. Partitions of the table will be retrieved in parallel if either ``column`` or ``predicates`` is specified. ``lowerBound`, ``upperBound`` and ``numPartitions`` is needed when ``column`` is specified. If both ``column`` and ``predicates`` are specified, ``column`` will be used. .. note:: Don't create too many partitions in parallel on a large cluster; \ otherwise Spark might crash your external database systems. :param url: a JDBC URL of the form ``jdbc:subprotocol:subname`` :param table: the name of the table :param column: the name of an integer column that will be used for partitioning; if this parameter is specified, then ``numPartitions``, ``lowerBound`` (inclusive), and ``upperBound`` (exclusive) will form partition strides for generated WHERE clause expressions used to split the column ``column`` evenly :param lowerBound: the minimum value of ``column`` used to decide partition stride :param upperBound: the maximum value of ``column`` used to decide partition stride :param numPartitions: the number of partitions :param predicates: a list of expressions suitable for inclusion in WHERE clauses; each one defines one partition of the :class:`DataFrame` :param properties: a dictionary of JDBC database connection arguments. Normally at least properties "user" and "password" with their corresponding values. For example { 'user' : 'SYSTEM', 'password' : 'mypassword' } :return: a DataFrame """ if properties is None: properties = dict() jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)() for k in properties: jprop.setProperty(k, properties[k]) if column is not None: assert lowerBound is not None, "lowerBound can not be None when ``column`` is specified" assert upperBound is not None, "upperBound can not be None when ``column`` is specified" assert numPartitions is not None, \ "numPartitions can not be None when ``column`` is specified" return self._df(self._jreader.jdbc(url, table, column, int(lowerBound), int(upperBound), int(numPartitions), jprop)) if predicates is not None: gateway = self._spark._sc._gateway jpredicates = utils.toJArray(gateway, gateway.jvm.java.lang.String, predicates) return self._df(self._jreader.jdbc(url, table, jpredicates, jprop)) return self._df(self._jreader.jdbc(url, table, jprop)) class DataFrameWriter(OptionUtils): """ Interface used to write a :class:`DataFrame` to external storage systems (e.g. file systems, key-value stores, etc). Use :func:`DataFrame.write` to access this. .. versionadded:: 1.4 """ def __init__(self, df): self._df = df self._spark = df.sql_ctx self._jwrite = df._jdf.write() def _sq(self, jsq): from pyspark.sql.streaming import StreamingQuery return StreamingQuery(jsq) @since(1.4) def mode(self, saveMode): """Specifies the behavior when data or table already exists. Options include: * `append`: Append contents of this :class:`DataFrame` to existing data. * `overwrite`: Overwrite existing data. * `error` or `errorifexists`: Throw an exception if data already exists. * `ignore`: Silently ignore this operation if data already exists. >>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data')) """ # At the JVM side, the default value of mode is already set to "error". # So, if the given saveMode is None, we will not call JVM-side's mode method. if saveMode is not None: self._jwrite = self._jwrite.mode(saveMode) return self @since(1.4) def format(self, source): """Specifies the underlying output data source. :param source: string, name of the data source, e.g. 'json', 'parquet'. >>> df.write.format('json').save(os.path.join(tempfile.mkdtemp(), 'data')) """ self._jwrite = self._jwrite.format(source) return self @since(1.5) def option(self, key, value): """Adds an output option for the underlying data source. You can set the following option(s) for writing files: * ``timeZone``: sets the string that indicates a timezone to be used to format timestamps in the JSON/CSV datasources or partition values. If it isn't set, it uses the default value, session local timezone. """ self._jwrite = self._jwrite.option(key, to_str(value)) return self @since(1.4) def options(self, **options): """Adds output options for the underlying data source. You can set the following option(s) for writing files: * ``timeZone``: sets the string that indicates a timezone to be used to format timestamps in the JSON/CSV datasources or partition values. If it isn't set, it uses the default value, session local timezone. """ for k in options: self._jwrite = self._jwrite.option(k, to_str(options[k])) return self @since(1.4) def partitionBy(self, *cols): """Partitions the output by the given columns on the file system. If specified, the output is laid out on the file system similar to Hive's partitioning scheme. :param cols: name of columns >>> df.write.partitionBy('year', 'month').parquet(os.path.join(tempfile.mkdtemp(), 'data')) """ if len(cols) == 1 and isinstance(cols[0], (list, tuple)): cols = cols[0] self._jwrite = self._jwrite.partitionBy(_to_seq(self._spark._sc, cols)) return self @since(2.3) def bucketBy(self, numBuckets, col, *cols): """Buckets the output by the given columns.If specified, the output is laid out on the file system similar to Hive's bucketing scheme. :param numBuckets: the number of buckets to save :param col: a name of a column, or a list of names. :param cols: additional names (optional). If `col` is a list it should be empty. .. note:: Applicable for file-based data sources in combination with :py:meth:`DataFrameWriter.saveAsTable`. >>> (df.write.format('parquet') # doctest: +SKIP ... .bucketBy(100, 'year', 'month') ... .mode("overwrite") ... .saveAsTable('bucketed_table')) """ if not isinstance(numBuckets, int): raise TypeError("numBuckets should be an int, got {0}.".format(type(numBuckets))) if isinstance(col, (list, tuple)): if cols: raise ValueError("col is a {0} but cols are not empty".format(type(col))) col, cols = col[0], col[1:] if not all(isinstance(c, basestring) for c in cols) or not(isinstance(col, basestring)): raise TypeError("all names should be `str`") self._jwrite = self._jwrite.bucketBy(numBuckets, col, _to_seq(self._spark._sc, cols)) return self @since(2.3) def sortBy(self, col, *cols): """Sorts the output in each bucket by the given columns on the file system. :param col: a name of a column, or a list of names. :param cols: additional names (optional). If `col` is a list it should be empty. >>> (df.write.format('parquet') # doctest: +SKIP ... .bucketBy(100, 'year', 'month') ... .sortBy('day') ... .mode("overwrite") ... .saveAsTable('sorted_bucketed_table')) """ if isinstance(col, (list, tuple)): if cols: raise ValueError("col is a {0} but cols are not empty".format(type(col))) col, cols = col[0], col[1:] if not all(isinstance(c, basestring) for c in cols) or not(isinstance(col, basestring)): raise TypeError("all names should be `str`") self._jwrite = self._jwrite.sortBy(col, _to_seq(self._spark._sc, cols)) return self @since(1.4) def save(self, path=None, format=None, mode=None, partitionBy=None, **options): """Saves the contents of the :class:`DataFrame` to a data source. The data source is specified by the ``format`` and a set of ``options``. If ``format`` is not specified, the default data source configured by ``spark.sql.sources.default`` will be used. :param path: the path in a Hadoop supported file system :param format: the format used to save :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param partitionBy: names of partitioning columns :param options: all other string options >>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode).options(**options) if partitionBy is not None: self.partitionBy(partitionBy) if format is not None: self.format(format) if path is None: self._jwrite.save() else: self._jwrite.save(path) @since(1.4) def insertInto(self, tableName, overwrite=False): """Inserts the content of the :class:`DataFrame` to the specified table. It requires that the schema of the class:`DataFrame` is the same as the schema of the table. Optionally overwriting any existing data. """ self._jwrite.mode("overwrite" if overwrite else "append").insertInto(tableName) @since(1.4) def saveAsTable(self, name, format=None, mode=None, partitionBy=None, **options): """Saves the content of the :class:`DataFrame` as the specified table. In the case the table already exists, behavior of this function depends on the save mode, specified by the `mode` function (default to throwing an exception). When `mode` is `Overwrite`, the schema of the :class:`DataFrame` does not need to be the same as that of the existing table. * `append`: Append contents of this :class:`DataFrame` to existing data. * `overwrite`: Overwrite existing data. * `error` or `errorifexists`: Throw an exception if data already exists. * `ignore`: Silently ignore this operation if data already exists. :param name: the table name :param format: the format used to save :param mode: one of `append`, `overwrite`, `error`, `errorifexists`, `ignore` \ (default: error) :param partitionBy: names of partitioning columns :param options: all other string options """ self.mode(mode).options(**options) if partitionBy is not None: self.partitionBy(partitionBy) if format is not None: self.format(format) self._jwrite.saveAsTable(name) @since(1.4) def json(self, path, mode=None, compression=None, dateFormat=None, timestampFormat=None, lineSep=None, encoding=None): """Saves the content of the :class:`DataFrame` in JSON format (`JSON Lines text format or newline-delimited JSON <http://jsonlines.org/>`_) at the specified path. :param path: the path in any Hadoop supported file system :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, bzip2, gzip, lz4, snappy and deflate). :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.text.SimpleDateFormat``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.text.SimpleDateFormat``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param encoding: specifies encoding (charset) of saved json files. If None is set, the default UTF-8 charset will be used. :param lineSep: defines the line separator that should be used for writing. If None is set, it uses the default value, ``\\n``. >>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode) self._set_opts( compression=compression, dateFormat=dateFormat, timestampFormat=timestampFormat, lineSep=lineSep, encoding=encoding) self._jwrite.json(path) @since(1.4) def parquet(self, path, mode=None, partitionBy=None, compression=None): """Saves the content of the :class:`DataFrame` in Parquet format at the specified path. :param path: the path in any Hadoop supported file system :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param partitionBy: names of partitioning columns :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, snappy, gzip, and lzo). This will override ``spark.sql.parquet.compression.codec``. If None is set, it uses the value specified in ``spark.sql.parquet.compression.codec``. >>> df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode) if partitionBy is not None: self.partitionBy(partitionBy) self._set_opts(compression=compression) self._jwrite.parquet(path) @since(1.6) def text(self, path, compression=None, lineSep=None): """Saves the content of the DataFrame in a text file at the specified path. :param path: the path in any Hadoop supported file system :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, bzip2, gzip, lz4, snappy and deflate). :param lineSep: defines the line separator that should be used for writing. If None is set, it uses the default value, ``\\n``. The DataFrame must have only one column that is of string type. Each row becomes a new line in the output file. """ self._set_opts(compression=compression, lineSep=lineSep) self._jwrite.text(path) @since(2.0) def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=None, header=None, nullValue=None, escapeQuotes=None, quoteAll=None, dateFormat=None, timestampFormat=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None, charToEscapeQuoteEscaping=None): """Saves the content of the :class:`DataFrame` in CSV format at the specified path. :param path: the path in any Hadoop supported file system :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, bzip2, gzip, lz4, snappy and deflate). :param sep: sets a single character as a separator for each field and value. If None is set, it uses the default value, ``,``. :param quote: sets a single character used for escaping quoted values where the separator can be part of the value. If None is set, it uses the default value, ``"``. If an empty string is set, it uses ``u0000`` (null character). :param escape: sets a single character used for escaping quotes inside an already quoted value. If None is set, it uses the default value, ``\`` :param escapeQuotes: a flag indicating whether values containing quotes should always be enclosed in quotes. If None is set, it uses the default value ``true``, escaping all values containing a quote character. :param quoteAll: a flag indicating whether all values should always be enclosed in quotes. If None is set, it uses the default value ``false``, only escaping values containing a quote character. :param header: writes the names of columns as the first line. If None is set, it uses the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.text.SimpleDateFormat``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.text.SimpleDateFormat``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from values being written should be skipped. If None is set, it uses the default value, ``true``. :param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from values being written should be skipped. If None is set, it uses the default value, ``true``. :param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for the quote character. If None is set, the default value is escape character when escape and quote characters are different, ``\0`` otherwise.. >>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode) self._set_opts(compression=compression, sep=sep, quote=quote, escape=escape, header=header, nullValue=nullValue, escapeQuotes=escapeQuotes, quoteAll=quoteAll, dateFormat=dateFormat, timestampFormat=timestampFormat, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, charToEscapeQuoteEscaping=charToEscapeQuoteEscaping) self._jwrite.csv(path) @since(1.5) def orc(self, path, mode=None, partitionBy=None, compression=None): """Saves the content of the :class:`DataFrame` in ORC format at the specified path. .. note:: Currently ORC support is only available together with Hive support. :param path: the path in any Hadoop supported file system :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param partitionBy: names of partitioning columns :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, snappy, zlib, and lzo). This will override ``orc.compress`` and ``spark.sql.orc.compression.codec``. If None is set, it uses the value specified in ``spark.sql.orc.compression.codec``. >>> orc_df = spark.read.orc('python/test_support/sql/orc_partitioned') >>> orc_df.write.orc(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode) if partitionBy is not None: self.partitionBy(partitionBy) self._set_opts(compression=compression) self._jwrite.orc(path) @since(1.4) def jdbc(self, url, table, mode=None, properties=None): """Saves the content of the :class:`DataFrame` to an external database table via JDBC. .. note:: Don't create too many partitions in parallel on a large cluster; \ otherwise Spark might crash your external database systems. :param url: a JDBC URL of the form ``jdbc:subprotocol:subname`` :param table: Name of the table in the external database. :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param properties: a dictionary of JDBC database connection arguments. Normally at least properties "user" and "password" with their corresponding values. For example { 'user' : 'SYSTEM', 'password' : 'mypassword' } """ if properties is None: properties = dict() jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)() for k in properties: jprop.setProperty(k, properties[k]) self.mode(mode)._jwrite.jdbc(url, table, jprop) def _test(): import doctest import os import tempfile import py4j from pyspark.context import SparkContext from pyspark.sql import SparkSession, Row import pyspark.sql.readwriter os.chdir(os.environ["SPARK_HOME"]) globs = pyspark.sql.readwriter.__dict__.copy() sc = SparkContext('local[4]', 'PythonTest') try: spark = SparkSession.builder.getOrCreate() except py4j.protocol.Py4JError: spark = SparkSession(sc) globs['tempfile'] = tempfile globs['os'] = os globs['sc'] = sc globs['spark'] = spark globs['df'] = spark.read.parquet('python/test_support/sql/parquet_partitioned') (failure_count, test_count) = doctest.testmod( pyspark.sql.readwriter, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF) sc.stop() if failure_count: sys.exit(-1) if __name__ == "__main__": _test() ```
[ { "content": "```python\n\"\"\"\nWSGI config for Creamas project.\n\nThis module contains the WSGI application used by Django's development server\nand any production WSGI deployments. It should expose a module-level variable\nnamed ``application``. Django's ``runserver`` and ``runfcgi`` commands discover\nthis...
[ { "content": "<|memory_start|>```python\n\"\"\"\nWSGI config for Creamas project.\n\nThis module contains the WSGI application used by Django's development server\nand any production WSGI deployments. It should expose a module-level variable\nnamed ``application``. Django's ``runserver`` and ``runfcgi`` command...
```python """ WSGI config for Creamas project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Creamas.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. #application = get_wsgi_application() from django.core.wsgi import get_wsgi_application from dj_static import Cling application = Cling(get_wsgi_application()) # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application) ```
[ { "content": "Repeat the following code:\n```python\nimport os\nimport urlparse\nimport subprocess\nimport smtplib\nimport getpass\nimport re\nfrom email.mime.text import MIMEText\n\nimport pyrax\n\n\nenv_file = '/home/ubuntu/blimp.env'\n\n\ndef run():\n print('---> Getting latest backup of {}...'.format(her...
[ { "content": "Repeat the following code:\n<|memory_start|>```python\nimport os\nimport urlparse\nimport subprocess\nimport smtplib\nimport getpass\nimport re\nfrom email.mime.text import MIMEText\n\nimport pyrax\n\n\nenv_file = '/home/ubuntu/blimp.env'\n\n\ndef run():\n print('---> Getting latest backup of {...
```python import os import urlparse import subprocess import smtplib import getpass import re from email.mime.text import MIMEText import pyrax env_file = '/home/ubuntu/blimp.env' def run(): print('---> Getting latest backup of {}...'.format(heroku_app_name)) latest_backup_url = subprocess.check_output( ['heroku', 'pgbackups:url', '-a', heroku_app_name]).strip() file_name = os.path.basename(urlparse.urlparse(latest_backup_url).path) backup_path = '{}/{}'.format(backups_directory, file_name) pyrax.settings.set('identity_type', 'rackspace') pyrax.set_credentials(rackspace_username, rackspace_api_key) cf = pyrax.cloudfiles cont_name = '{}-backups'.format(heroku_app_name) cont = cf.create_container(cont_name) try: print('---> Checking if {} already exists...'.format(file_name)) cont.get_object(file_name) print('---> {} is already backed up...'.format(file_name)) except pyrax.exceptions.NoSuchObject: print('---> Downloading {}...'.format(file_name)) subprocess.call(['curl', '-o', backup_path, latest_backup_url]) try: print('---> Verifying {}...'.format(file_name)) subprocess.call([ 'pg_restore', '--clean', '--no-acl', '--no-owner', '--username', db_username, '--dbname', db_name, '--schema', 'public', backup_path ]) print('---> Uploading {}...'.format(file_name)) cf.upload_file(cont, open(backup_path), obj_name=file_name) msg = 'Just verified and backed up {}'.format(file_name) print('---> {}...'.format(msg)) except Exception as e: send_email('Backup failed', str(e)) print('---> Deleting local backup file {}...'.format(file_name)) subprocess.call(['rm', backup_path]) print('---> Done!') def send_email(subject, error_message): server = smtplib.SMTP('smtp.gmail.com', 587) server.starttls() server.login(email, password) msg = MIMEText(error_message) msg['From'] = email msg['To'] = recipient_email msg['Subject'] = subject server.sendmail(email, [msg['To']], msg.as_string()) server.quit() def set_env(): try: with open(env_file) as f: content = f.read() except IOError: content = '' for line in content.splitlines(): m1 = re.match(r'\Aexport ([A-Za-z_0-9]+)=(.*)\Z', line) if m1: key, val = m1.group(1), m1.group(2) m2 = re.match(r"\A'(.*)'\Z", val) if m2: val = m2.group(1) m3 = re.match(r'\A"(.*)"\Z', val) if m3: val = re.sub(r'\\(.)', r'\1', m3.group(1)) os.environ.setdefault(key, val) if __name__ == '__main__': set_env() heroku_app_name = os.environ['HEROKU_APP_NAME'] rackspace_username = os.environ['RACKSPACE_USERNAME'] rackspace_api_key = os.environ['RACKSPACE_API_KEY'] backups_directory = os.path.dirname(os.path.realpath(__file__)) email = os.environ['BACKUP_EMAIL'] password = os.environ['BACKUP_EMAIL_PASSWORD'] recipient_email = os.environ['BACKUP_RECIPIENT_EMAIL'] db_username = getpass.getuser() db_name = db_username run() ```
[ { "content": "Reproduce the code exactly as provided (keep formatting):\n```python\n#!/usr/bin/python\n\n#run as array job\n#if files are bzipped -> copy to local disk and unzip there\n\n\nimport os,sys\n\ndb_dir = sys.argv[1]\n\nzipped = sys.argv[2] #are files bzipped?\n\nif zipped == 'True':\n\tzipped = True\...
[ { "content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\n#!/usr/bin/python\n\n#run as array job\n#if files are bzipped -> copy to local disk and unzip there\n\n\nimport os,sys\n\ndb_dir = sys.argv[1]\n\nzipped = sys.argv[2] #are files bzipped?\n\nif zipped == 'True':\n...
```python #!/usr/bin/python #run as array job #if files are bzipped -> copy to local disk and unzip there import os,sys db_dir = sys.argv[1] zipped = sys.argv[2] #are files bzipped? if zipped == 'True': zipped = True else: zipped = False files = os.listdir(db_dir) file = '#$ -S /bin/tcsh\n#$ -cwd\n#$ -V\n' path = os.getcwd() counter = 1 for i in files: #set up all sub dirs with correct input files if i[-2:] == 'db' or 'z2': sub_dir = 'acd_' + str(counter) if not os.path.exists(sub_dir): os.system('mkdir ' + sub_dir) os.chdir(path + '/' + sub_dir) os.system('cp ../INDOCK .') command = 'ln -s ' + db_dir + i + ' db_file' print command os.system(command) counter = counter + 1 os.chdir('..') #create file to submit array job start_file = open('start_dock.bin', 'w') start_file.write(file) start_file.write('cd acd_$SGE_TASK_ID\n') if zipped: #files must be unzipped, to save diskspace to do this on temporary cluster disk, $TMPDIR start_file.write('ls -larth *\n') #save name of db file that should be docked start_file.write('cp db_file $TMPDIR/db_file.db.bz2\n') start_file.write('bunzip2 $TMPDIR/db_file.db.bz2\n') start_file.write('unlink db_file\n') start_file.write('ln -s $TMPDIR/db_file.db db_file\n') start_file.write('/software/dockenv/bin/Linux/dock_vol.test\n') if zipped: start_file.write('unlink db_file\n') start_file.write('rm -f *.1') start_file.close() os.system('chmod 755 start_dock.bin') os.system('qsub -q 64bit-pri.q,64bit.q -t 1-' + str(counter-1) + ' start_dock.bin') ```
[ { "content": "```python\nfrom random import uniform\nimport numpy\n\ndef sigmoidFunction(x):\n try:\n ret = 1 / (1 + numpy.exp(-x))\n except OverflowError:\n ret = 0\n return ret\n\ndef softMax(array):\n exp = [numpy.exp(x) for x in array]\n return numpy.array([x / sum(exp) for x in...
[ { "content": "<|memory_start|>```python\nfrom random import uniform\nimport numpy\n\ndef sigmoidFunction(x):\n try:\n ret = 1 / (1 + numpy.exp(-x))\n except OverflowError:\n ret = 0\n return ret\n\ndef softMax(array):\n exp = [numpy.exp(x) for x in array]\n return numpy.array([x / s...
```python from random import uniform import numpy def sigmoidFunction(x): try: ret = 1 / (1 + numpy.exp(-x)) except OverflowError: ret = 0 return ret def softMax(array): exp = [numpy.exp(x) for x in array] return numpy.array([x / sum(exp) for x in exp]) global sigmoid sigmoid = numpy.vectorize(sigmoidFunction) class FastNetwork: def __init__(self, rowSizes, learningRate=1, softmax=True): self.weights = list() self.learningRate = learningRate self.softmax = softmax #Rates initialized according to: #http://datascience.stackexchange.com/questions/10926/how-to-deep-neural-network-weight-initialization for i in range(len(rowSizes)): if i == 0: r = numpy.sqrt(6 / (1 + rowSizes[i + 1])) elif i == len(rowSizes) - 1: r = numpy.sqrt(6 / (rowSizes[i - 1] + 1)) else: r = numpy.sqrt(6 / (rowSizes[i - 1] + rowSizes[i + 1])) if i < len(rowSizes) - 1: tempArray = numpy.array([uniform(-r, r) for x in range(rowSizes[i]*(rowSizes[i+1] + 1))]) tempArray = numpy.reshape(tempArray, (rowSizes[i], rowSizes[i+1] + 1)) else: tempArray = numpy.array([uniform(-r, r) for x in range(rowSizes[i]*2)]) tempArray = numpy.reshape(tempArray, (rowSizes[i], 2)) self.weights.append(tempArray) def startFromFileOld(self, filename): #Open files compatible with network.py's Network with open(filename) as weightFile: rows = weightFile.readlines() rowData = [numpy.fromstring(row.strip()[2:-1], sep=' ') for row in rows] assert len(rowData) == sum(matrix.shape[0] for matrix in self.weights) for i in range(len(self.weights)): size = self.weights[i].shape[0] length = self.weights[i].shape[1] assert all([len(row) == length for row in rowData[:size]]) newArray = numpy.stack(rowData[0:size]) self.weights[i] = newArray rowData = rowData[size:] def startFromFile(self, filename): #Open files and overwrite weights with open(filename) as weightFile: weightStrings = weightFile.readlines() assert len(weightStrings) == len(self.weights) for i in range(len(weightStrings)): weightString = weightStrings[i].strip() weightArray = numpy.fromstring(weightString[2:-1], sep=' ') assert weightArray.size == self.weights[i].size weightArray = numpy.reshape(weightArray, self.weights[i].shape) self.weights[i] = weightArray def writeToFile(self, filename): #Write all of the weights data to file with open(filename, 'w') as weightFile: numpy.set_printoptions(threshold = numpy.inf, linewidth = numpy.inf) for matrix in self.weights: printable = numpy.reshape(matrix, (numpy.product(matrix.shape))) weightFile.write(numpy.array_str(printable) + "\n") numpy.set_printoptions(threshold = 10, linewidth = 75) def networkOutputs(self, inputs): #Calculate the outputs for each row in the neural network assert len(inputs) == self.weights[len(self.weights) - 1].shape[0] outputs = list() for i in reversed(range(len(self.weights))): #Input Layer if i == len(self.weights) - 1: inputArray = numpy.array(inputs) inputArray = numpy.reshape(inputArray, (len(inputs), 1)) onesArray = numpy.ones((len(inputs), 1)) inputArray = numpy.concatenate((inputArray, onesArray), axis=1) #Row-wise dot product of inputs and weights output = numpy.einsum('ij, ij->i', self.weights[i], inputArray) output = sigmoid(output) outputs.append(output) #Otherwise else: inputArray = numpy.array(numpy.concatenate((outputs[0], [1]))) #Matrix multiplication of weights and input vector output = self.weights[i] @ inputArray if i == 0 and self.softmax: output = softMax(output) else: output = sigmoid(output) outputs.insert(0, output) return outputs def classify(self, inputs): #Return the most probable output outputs = self.networkOutputs(inputs) maxValue = max(outputs[0]) maxIndex = outputs[0].tolist().index(maxValue) return maxIndex, maxValue def backPropagate(self, inputs, targets): outputs = self.networkOutputs(inputs) targets = numpy.array(targets) inputs = numpy.array(inputs) deltas = list() changes = list() #Back propagate the error for i in range(len(self.weights)): #Output layer error and change if i == 0: if self.softmax: error = targets - outputs[i] delta = error else: error = targets - outputs[i] #delta = error * outputs * (1 - outputs) delta = error * outputs[i] * (numpy.ones((self.weights[i].shape[0])) - outputs[i]) deltas.append(delta) change = numpy.outer((self.learningRate * deltas[i]), numpy.array(numpy.concatenate((outputs[i+1], [1])))) changes.append(change) #Input layer error and change elif i == len(self.weights) - 1: error = numpy.dot(deltas[i - 1], self.weights[i - 1][:,:-1]) delta = error * outputs[i] * (numpy.ones((self.weights[i].shape[0])) - outputs[i]) deltas.append(delta) doubleDelta = numpy.stack((delta, delta)) inputArray = numpy.stack((inputs, numpy.ones(self.weights[i].shape[0]))) change = numpy.transpose(doubleDelta * inputArray) changes.append(change) #Hidden layer error and change else: error = numpy.dot(deltas[i - 1], self.weights[i - 1][:,:-1]) delta = error * outputs[i] * (numpy.ones((self.weights[i].shape[0])) - outputs[i]) deltas.append(delta) change = numpy.outer((self.learningRate * deltas[i]), numpy.array(numpy.concatenate((outputs[i+1], [1])))) changes.append(change) #Update the weights matrices for i in range(len(self.weights)): self.weights[i] += changes[i] """ numpy.set_printoptions(threshold = numpy.inf, linewidth = numpy.inf) network = FastNetwork([40,100,161]) #network.writeToFile("test.txt") #network.startFromFile("test.txt") network.startFromFileOld("old.txt") outputs = network.networkOutputs([1]*161) print(outputs[0]) network.backPropagate([1]*161, [0] + [1] + [0]*38) import time start = time.time() for i in range(1000): network.backPropagate([1]*161, [0] + [1] + [0]*38) print(time.time() - start) """ """ print(network.classify([1]*161)) print(network.networkOutputs([1]*161)[0]) """ ```
[ { "content": "```python\n# Written by Nicolas Neubauer, Arno Bakker\r\n# see LICENSE.txt for license information\r\n#\r\n# Test case for BuddyCast overlay version 12 (and 8). To be integrated into\r\n# test_buddycast_msg.py\r\n#\r\n# Very sensitive to the order in which things are put into DB,\r\n# so not a rob...
[ { "content": "<|memory_start|>```python\n# Written by Nicolas Neubauer, Arno Bakker\r\n# see LICENSE.txt for license information\r\n#\r\n# Test case for BuddyCast overlay version 12 (and 8). To be integrated into\r\n# test_buddycast_msg.py\r\n#\r\n# Very sensitive to the order in which things are put into DB,\r...
```python # Written by Nicolas Neubauer, Arno Bakker # see LICENSE.txt for license information # # Test case for BuddyCast overlay version 12 (and 8). To be integrated into # test_buddycast_msg.py # # Very sensitive to the order in which things are put into DB, # so not a robust test import unittest import os import sys import time import tempfile import shutil from sha import sha from random import randint,shuffle from traceback import print_exc from types import StringType, ListType, DictType from threading import Thread from time import sleep from M2Crypto import Rand,EC from Tribler.Test.test_as_server import TestAsServer from olconn import OLConnection from Tribler.__init__ import LIBRARYNAME from Tribler.Core.BitTornado.bencode import bencode,bdecode from Tribler.Core.BitTornado.BT1.MessageID import * from Tribler.Core.CacheDB.CacheDBHandler import BarterCastDBHandler from Tribler.Core.BuddyCast.buddycast import BuddyCastFactory, BuddyCastCore from Tribler.Core.Overlay.SecureOverlay import OLPROTO_VER_FIRST, OLPROTO_VER_SECOND, OLPROTO_VER_THIRD, OLPROTO_VER_FOURTH, OLPROTO_VER_FIFTH, OLPROTO_VER_SIXTH, OLPROTO_VER_SEVENTH, OLPROTO_VER_EIGHTH, OLPROTO_VER_ELEVENTH, OLPROTO_VER_CURRENT, OLPROTO_VER_LOWEST from Tribler.Core.simpledefs import * from Tribler.Core.CacheDB.SqliteCacheDBHandler import * from Tribler.Core.CacheDB.sqlitecachedb import CURRENT_MAIN_DB_VERSION DEBUG=True class TestBuddyCastMsg8Plus(TestAsServer): """ Testing BuddyCast 5 / overlay protocol v12+v8 interactions: swarm size info exchange. """ def setUp(self): """ override TestAsServer """ TestAsServer.setUp(self) Rand.load_file('randpool.dat', -1) def setUpPreSession(self): """ override TestAsServer """ TestAsServer.setUpPreSession(self) # Enable buddycast self.config.set_buddycast(True) BuddyCastCore.TESTASSERVER = True self.config.set_start_recommender(True) self.config.set_bartercast(True) # Arno, 2010-02-02: Install empty superpeers.txt so no interference from # real BuddyCast. self.config.set_crawler(False) # Write superpeers.txt self.install_path = tempfile.mkdtemp() spdir = os.path.join(self.install_path, LIBRARYNAME, 'Core') os.makedirs(spdir) statsdir = os.path.join(self.install_path, LIBRARYNAME, 'Core', 'Statistics') os.makedirs(statsdir) superpeerfilename = os.path.join(spdir, 'superpeer.txt') print >> sys.stderr,"test: writing empty superpeers to",superpeerfilename f = open(superpeerfilename, "w") f.write('# Leeg') f.close() self.config.set_install_dir(self.install_path) srcfiles = [] srcfiles.append(os.path.join(LIBRARYNAME,"schema_sdb_v"+str(CURRENT_MAIN_DB_VERSION)+".sql")) for srcfile in srcfiles: sfn = os.path.join('..','..',srcfile) dfn = os.path.join(self.install_path,srcfile) print >>sys.stderr,"test: copying",sfn,dfn shutil.copyfile(sfn,dfn) def setUpPostSession(self): """ override TestAsServer """ TestAsServer.setUpPostSession(self) self.mypermid = str(self.my_keypair.pub().get_der()) self.hispermid = str(self.his_keypair.pub().get_der()) self.myhash = sha(self.mypermid).digest() self.buddycast = BuddyCastFactory.getInstance(superpeer=True) self.buddycast.olthread_register(True) # arg0 = sys.argv[0].lower() # if arg0.endswith('.exe'): # installdir = os.path.abspath(os.path.dirname(sys.argv[0])) # else: # installdir = os.getcwd() # self.utility = Utility(installdir) # wait for buddycast to have completed on run cycle, # seems to create problems otherwise while not self.buddycast.ranonce: pass def tearDown(self): """ override TestAsServer """ TestAsServer.tearDown(self) try: os.remove('randpool.dat') except: pass def singtest_all_olproto_ver_current(self): self._test_all(OLPROTO_VER_CURRENT) def singtest_all_olproto_ver_11(self): self._test_all(11) def singtest_all_olproto_ver_8(self): self._test_all(8) def _test_all(self,myoversion): """ I want to start a Tribler client once and then connect to it many times. So there must be only one test method to prevent setUp() from creating a new client every time. The code is constructed so unittest will show the name of the (sub)test where the error occured in the traceback it prints. """ # Arno, 2010-02-03: clicklog 1,2,3 must be run consecutively # create_mypref() must be called after clicklog 1,2,3 self.subtest_good_buddycast_clicklog(1,myoversion) self.subtest_good_buddycast_clicklog(2,myoversion) self.subtest_good_buddycast_clicklog(3,myoversion) self.subtest_terms(myoversion) self.subtest_create_mypref() self.subtest_create_bc(myoversion) def get_good_clicklog_msg(self,n,myoversion=8): if n==1: # OLv8: # infohash # search terms # click position # reranking strategy # OLv11: # number of seeders # number of leechers # age of checking # number of sources seen' prec = ["hash1hash1hash1hash1", ["linux","ubuntu"], 1, 2] if myoversion >= 11: prec += [400, 500, 1000, 50] preferences = [prec] if myoversion >= 11: prec = ['hash0hash0hash0hash0', 300, 800, 5000, 30] collected_torrents = [prec] else: collected_torrents = ['hash0hash0hash0hash0'] elif n==2: prec = ["hash2hash2hash2hash2", ["linux", "ubuntu"], 2, 2] if myoversion >= 11: prec += [600, 700,20000,60] preferences = [prec] if myoversion >= 11: prec = ['hash2hash2hash2hash2', 500, 200, 70000, 8000] collected_torrents = [prec] else: collected_torrents = ["hash2hash2hash2hash2"] elif n==3: prec = ["hash3hash3hash3hash3", ["linux","redhat"], 5 ,2 ] if myoversion >= 11: prec += [800, 900, 30000, 70] preferences = [prec] if myoversion >= 11: prec = ['hash3hash3hash3hash3', 700, 200, 45000, 75] collected_torrents = [prec] else: collected_torrents = ['hash3hash3hash3hash3'] return { 'preferences': preferences, 'ndls': 1, 'permid': self.mypermid, 'ip': '127.0.0.1', #'130.149.146.117', 'taste buddies': [], 'name': 'nic', 'random peers': [], 'collected torrents': collected_torrents, 'nfiles': 0, 'npeers': 0, 'port': self.hisport, 'connectable': 1} def subtest_good_buddycast_clicklog(self, i, myoversion): """sends two buddy cast messages containing clicklog data, then checks in the DB to find out whether the correct data was stored. This in fact checks quite a lot of things. For example, the messages always contain terms [1,2] """ print >>sys.stderr,"\ntest: subtest_good_buddycast_clicklog",i,"selversion",myoversion s = OLConnection(self.my_keypair,'localhost',self.hisport,myoversion=myoversion) prefmsg = self.get_good_clicklog_msg(i,myoversion) print >>sys.stderr,myoversion,`prefmsg` msg = self.create_payload(prefmsg) s.send(msg) resp = s.recv() if len(resp)>0: print >>sys.stderr,"test: reply message %s:%s" % (getMessageName(resp[0]), resp[1:]) else: print >>sys.stderr,"no reply message" self.assert_(len(resp) > 0) #if we have survived this, check if the content of the remote database is correct search_db = self.session.open_dbhandler(NTFY_SEARCH) term_db = self.session.open_dbhandler(NTFY_TERM) pref_db = self.session.open_dbhandler(NTFY_PREFERENCES) torrent_db = self.session.open_dbhandler(NTFY_TORRENTS) torrent_id = None while not torrent_id: hash = prefmsg['preferences'][0][0] print >> sys.stderr, "hash: %s, bin2str: %s" % (hash, bin2str(hash)) torrent_data = torrent_db.getTorrentID(hash) print >> sys.stderr, "Torrent data for torrent %s: %s" % (prefmsg['preferences'][0][0], torrent_data) torrent_id = torrent_data if not torrent_id: print >> sys.stderr, "torrent not yet saved, waiting..." sleep(1) # self.getAll("rowid, peer_id, torrent_id, click_position,reranking_strategy", order_by="peer_id, torrent_id") real_prefs = pref_db.getAllEntries() print >>sys.stderr,"test: getAllEntries returned",real_prefs my_peer_id = real_prefs[0][1] real_terms = term_db.getAllEntries() real_search = search_db.getAllEntries() if i==1: wanted_prefs = [[1,my_peer_id,1,1,2]] wanted_terms = [[1,u'linux'], [2,u'ubuntu']] wanted_search = [[1,my_peer_id,'?',1,0], [2,my_peer_id,'?',2,1]] elif i==2: # Arno, 2010-02-04: Nicolas assumed the collected torrent for i=1 # wouldn't be stored in DB? wanted_prefs = [[1,my_peer_id,'?',1,2],[2,my_peer_id,torrent_id,2,2]] wanted_terms = [[1,u'linux'], [2,u'ubuntu']] wanted_search = [[1,my_peer_id,'?',1,0], [2,my_peer_id,'?',2,1], [3,my_peer_id,'?',1,0], [4,my_peer_id,'?',2,1]] elif i==3: wanted_prefs = [[1,my_peer_id,'?',1,2],[2,my_peer_id,'?',2,2],[3,my_peer_id,torrent_id,5,2]] wanted_terms = [[1,u'linux'], [2,u'ubuntu'], [3, u'redhat']] wanted_search = [[1,my_peer_id,'?',1,0], [2,my_peer_id,'?',2,1], [3,my_peer_id,'?',1,0], [4,my_peer_id,'?',2,1], [5,my_peer_id,'?',1,0], [6,my_peer_id,'?',3,1]] print >> sys.stderr, "real_prefs: %s" % real_prefs print >> sys.stderr, "real_terms: %s" % real_terms print >> sys.stderr, "real_search: %s " % real_search print >> sys.stderr, "wanted_prefs: %s" % wanted_prefs print >> sys.stderr, "wanted_terms: %s" % wanted_terms print >> sys.stderr, "wanted_search: %s " % wanted_search self.assert_(self.lol_equals(real_search, wanted_search, "good buddycast %d: search" % i)) self.assert_(self.lol_equals(real_terms, wanted_terms, "good buddycast %d: terms" % i)) self.assert_(self.lol_equals(real_prefs, wanted_prefs, "good buddycast %d: prefs" % i)) def subtest_terms(self,myoversion): """assumes clicklog message 1 and 2 have been sent and digested""" print >>sys.stderr,"\ntest: subtest_terms" term_db = self.session.open_dbhandler(NTFY_TERM) s = OLConnection(self.my_keypair,'localhost',self.hisport,myoversion=myoversion) msg = self.get_good_clicklog_msg(3,myoversion) msg = self.create_payload(msg) s.send(msg) resp = s.recv() self.assert_(len(resp) > 0) termid = term_db.getTermID(u"linux") print >>sys.stderr, "TermID for Linux: %s" % termid #self.assert_(termid == 1) #self.assert_(term_db.getTerm(1)==bin2str(str(u"linux"))) completedTerms = term_db.getTermsStartingWith("li") print >> sys.stderr, "terms starting with l: %s" % completedTerms self.assert_(len(completedTerms)==1) self.assert_(u'linux' in completedTerms) term_db.insertTerm("asd#") completedTerms = term_db.getTermsStartingWith("asd") print >> sys.stderr, "terms starting with asd: %s" % completedTerms self.assert_(len(completedTerms)==1) # Arno, 2010-02-03: Nicolas had 'asd' here, but I don't see any place # where the # should have been stripped. # self.assert_(u'asd#' in completedTerms) def subtest_create_mypref(self): print >>sys.stderr,"\ntest: creating test MyPreference data" torrent_db = self.session.open_dbhandler(NTFY_TORRENTS) torrent_db.addInfohash('mhashmhashmhashmhash') torrent_id = torrent_db.getTorrentID('mhashmhashmhashmhash') mypref_db = self.session.open_dbhandler(NTFY_MYPREFERENCES) search_db = self.session.open_dbhandler(NTFY_SEARCH) mypref_db.addMyPreference('mhashmhashmhashmhash', {'destination_path':''}, commit=True) clicklog_data = { 'click_position': 1, 'reranking_strategy': 2, 'keywords': ['linux', 'fedora'] } mypref_db.addClicklogToMyPreference('mhashmhashmhashmhash', clicklog_data, commit=True) # self.getAll("torrent_id, click_position, reranking_strategy", order_by="torrent_id") allEntries = mypref_db.getAllEntries() print >> sys.stderr, "all mypref entries: %s" % allEntries self.assert_(len(allEntries)==1) # (torrent_id, click_pos, rerank_strategy) mypref_wanted = [['?',1,2]] self.assert_(self.lol_equals(allEntries, mypref_wanted, "create mypref all")) # self.getAll("rowid, peer_id, torrent_id, term_id, term_order ", order_by="rowid") real_search = search_db.getAllOwnEntries() wanted_search = [[7,0,torrent_id,1,0], [8,0,torrent_id,5,1]] # is now 5 for some reason self.assert_(self.lol_equals(real_search, wanted_search, "create mypref allown")) def subtest_create_bc(self,myoversion): print >>sys.stderr,"\ntest: creating test create_bc" torrent_db = self.session.open_dbhandler(NTFY_TORRENTS) torrent_db._db.update("Torrent", status_id=1) pref_db = self.session.open_dbhandler(NTFY_MYPREFERENCES) pref_db.loadData() msg = self.buddycast.buddycast_core.createBuddyCastMessage(0, myoversion, target_ip="127.0.0.1", target_port=80) print >> sys.stderr, "created bc pref: %s" % msg wantpref = ['mhashmhashmhashmhash',['linux','fedora'],1,2] if myoversion >= OLPROTO_VER_ELEVENTH: wantpref += [-1,-1,-1,-1] wantprefs = [wantpref] self.assert_(msg['preferences']==wantprefs) def lol_equals(self, lol1, lol2, msg): ok = True for (l1, l2) in zip(lol1, lol2): for (e1, e2) in zip(l1, l2): if e1=='?' or e2=='?': continue if not e1==e2: print >> sys.stderr, "%s != %s!" % (e1, e2) ok = False break if not ok: print >> sys.stderr, "%s: lol != lol:\nreal %s\nwanted %s" % (msg, lol1, lol2) return ok def create_payload(self,r): return BUDDYCAST+bencode(r) def test_suite(): suite = unittest.TestSuite() # We should run the tests in a separate Python interpreter to prevent # problems with our singleton classes, e.g. PeerDB, etc. if len(sys.argv) != 2: print "Usage: python test_buddycast_msg8plus.py <method name>" else: suite.addTest(TestBuddyCastMsg8Plus(sys.argv[1])) return suite def main(): unittest.main(defaultTest='test_suite',argv=[sys.argv[0]]) if __name__ == "__main__": main() ```
[ { "content": "Repeat the code precisely:\n```python\n#!/bin/python\n#\n# Hacking together tutorials from deeplearning.net to make\n# a MLP model\n\nimport numpy\nimport theano\nimport theano.tensor as T\nimport sklearn.cross_validation\n\nclass LogisticRegression(object):\n \"\"\"Multi-class Logistic Regress...
[ { "content": "Repeat the code precisely:\n<|memory_start|>```python\n#!/bin/python\n#\n# Hacking together tutorials from deeplearning.net to make\n# a MLP model\n\nimport numpy\nimport theano\nimport theano.tensor as T\nimport sklearn.cross_validation\n\nclass LogisticRegression(object):\n \"\"\"Multi-class ...
```python #!/bin/python # # Hacking together tutorials from deeplearning.net to make # a MLP model import numpy import theano import theano.tensor as T import sklearn.cross_validation class LogisticRegression(object): """Multi-class Logistic Regression Class The logistic regression is fully described by a weight matrix :math:`W` and bias vector :math:`b`. Classification is done by projecting data points onto a set of hyperplanes, the distance to which is used to determine a class membership probability. """ def __init__(self, input, n_in, n_out): """ Initialize the parameters of the logistic regression :type input: theano.tensor.TensorType :param input: symbolic variable that describes the input of the architecture (one minibatch) :type n_in: int :param n_in: number of input units, the dimension of the space in which the datapoints lie :type n_out: int :param n_out: number of output units, the dimension of the space in which the labels lie """ # start-snippet-1 # initialize with 0 the weights W as a matrix of shape (n_in, n_out) self.W = theano.shared( value=numpy.zeros( (n_in, n_out), dtype=theano.config.floatX ), name='W', borrow=True ) # initialize the baises b as a vector of n_out 0s self.b = theano.shared( value=numpy.zeros( (n_out,), dtype=theano.config.floatX ), name='b', borrow=True ) # symbolic expression for computing the matrix of class-membership # probabilities # Where: # W is a matrix where column-k represent the separation hyper plain for # class-k # x is a matrix where row-j represents input training sample-j # b is a vector where element-k represent the free parameter of hyper # plain-k self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b) # symbolic description of how to compute prediction as class whose # probability is maximal self.y_pred = T.argmax(self.p_y_given_x, axis=1) # end-snippet-1 # parameters of the model self.params = [self.W, self.b] def negative_log_likelihood(self, y): """Return the mean of the negative log-likelihood of the prediction of this model under a given target distribution. .. math:: \frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) = \frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\ \ell (\theta=\{W,b\}, \mathcal{D}) :type y: theano.tensor.TensorType :param y: corresponds to a vector that gives for each example the correct label Note: we use the mean instead of the sum so that the learning rate is less dependent on the batch size """ # start-snippet-2 # y.shape[0] is (symbolically) the number of rows in y, i.e., # number of examples (call it n) in the minibatch # T.arange(y.shape[0]) is a symbolic vector which will contain # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of # Log-Probabilities (call it LP) with one row per example and # one column per class LP[T.arange(y.shape[0]),y] is a vector # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is # the mean (across minibatch examples) of the elements in v, # i.e., the mean log-likelihood across the minibatch. return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y]) # end-snippet-2 def errors(self, y): """Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch ; zero one loss over the size of the minibatch :type y: theano.tensor.TensorType :param y: corresponds to a vector that gives for each example the correct label """ # check if y has same dimension of y_pred if y.ndim != self.y_pred.ndim: raise TypeError( 'y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type) ) # check if y is of the correct datatype if y.dtype.startswith('int'): # the T.neq operator returns a vector of 0s and 1s, where 1 # represents a mistake in prediction return T.mean(T.neq(self.y_pred, y)) else: raise NotImplementedError() class HiddenLayer(object): def __init__(self, rng, input, n_in, n_out, W=None, b=None, activation=T.tanh): """ Typical hidden layer of a MLP: units are fully-connected and have sigmoidal activation function. Weight matrix W is of shape (n_in,n_out) and the bias vector b is of shape (n_out,). NOTE : The nonlinearity used here is tanh Hidden unit activation is given by: tanh(dot(input,W) + b) :type rng: numpy.random.RandomState :param rng: a random number generator used to initialize weights :type input: theano.tensor.dmatrix :param input: a symbolic tensor of shape (n_examples, n_in) :type n_in: int :param n_in: dimensionality of input :type n_out: int :param n_out: number of hidden units :type activation: theano.Op or function :param activation: Non linearity to be applied in the hidden layer """ self.input = input # end-snippet-1 # `W` is initialized with `W_values` which is uniformely sampled # from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden)) # for tanh activation function # the output of uniform if converted using asarray to dtype # theano.config.floatX so that the code is runable on GPU # Note : optimal initialization of weights is dependent on the # activation function used (among other things). # For example, results presented in [Xavier10] suggest that you # should use 4 times larger initial weights for sigmoid # compared to tanh # We have no info for other function, so we use the same as # tanh. if W is None: W_values = numpy.asarray( rng.uniform( low=-numpy.sqrt(6. / (n_in + n_out)), high=numpy.sqrt(6. / (n_in + n_out)), size=(n_in, n_out) ), dtype=theano.config.floatX ) if activation == theano.tensor.nnet.sigmoid: W_values *= 4 W = theano.shared(value=W_values, name='W', borrow=True) if b is None: b_values = numpy.zeros((n_out,), dtype=theano.config.floatX) b = theano.shared(value=b_values, name='b', borrow=True) self.W = W self.b = b lin_output = T.dot(input, self.W) + self.b self.output = ( lin_output if activation is None else activation(lin_output) ) # parameters of the model self.params = [self.W, self.b] # start-snippet-2 class MLP(object): """Multi-Layer Perceptron Class A multilayer perceptron is a feedforward artificial neural network model that has one layer or more of hidden units and nonlinear activations. Intermediate layers usually have as activation function tanh or the sigmoid function (defined here by a ``HiddenLayer`` class) while the top layer is a softamx layer (defined here by a ``LogisticRegression`` class). """ def __init__(self, rng, input, n_in, n_hidden, n_out): """Initialize the parameters for the multilayer perceptron :type rng: numpy.random.RandomState :param rng: a random number generator used to initialize weights :type input: theano.tensor.TensorType :param input: symbolic variable that describes the input of the architecture (one minibatch) :type n_in: int :param n_in: number of input units, the dimension of the space in which the datapoints lie :type n_hidden: int :param n_hidden: number of hidden units :type n_out: int :param n_out: number of output units, the dimension of the space in which the labels lie """ # Since we are dealing with a one hidden layer MLP, this will translate # into a HiddenLayer with a tanh activation function connected to the # LogisticRegression layer; the activation function can be replaced by # sigmoid or any other nonlinear function self.hiddenLayer = HiddenLayer( rng=rng, input=input, n_in=n_in, n_out=n_hidden, activation=T.tanh ) # The logistic regression layer gets as input the hidden units # of the hidden layer self.logRegressionLayer = LogisticRegression( input=self.hiddenLayer.output, n_in=n_hidden, n_out=n_out ) # end-snippet-2 start-snippet-3 # L1 norm ; one regularization option is to enforce L1 norm to # be small self.L1 = ( abs(self.hiddenLayer.W).sum() + abs(self.logRegressionLayer.W).sum() ) # square of L2 norm ; one regularization option is to enforce # square of L2 norm to be small self.L2_sqr = ( (self.hiddenLayer.W ** 2).sum() + (self.logRegressionLayer.W ** 2).sum() ) # negative log likelihood of the MLP is given by the negative # log likelihood of the output of the model, computed in the # logistic regression layer self.negative_log_likelihood = ( self.logRegressionLayer.negative_log_likelihood ) # same holds for the function computing the number of errors self.errors = self.logRegressionLayer.errors # the parameters of the model are the parameters of the two layer it is # made out of self.params = self.hiddenLayer.params + self.logRegressionLayer.params # end-snippet-3 class MLP_sk_interface(object): def __init__(self, learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000, batch_size=20, n_hidden=500, verbose=False): # store the many initialisation parameters self.learning_rate=learning_rate self.L1_reg=L1_reg self.L2_reg=L2_reg self.n_epochs=n_epochs self.batch_size=batch_size self.n_hidden=n_hidden self.verbose=verbose def fit(self,X,y,random_state=1234): """ Trying to make this class conform to sklearn API conventions. input: * X - feature vectors, in an array, each row is a data point * y - target vector of labels """ # get dimensionality of input d_in = int(numpy.sqrt(X.shape[1])) d_out = len(set(y)) # Shuffle X and y X,y = sklearn.utils.shuffle(X,y,random_state=random_state) split = int(X.shape[0]/3) # take stratified splits for test, train and validation train_set_x,train_set_y = self.shared_dataset(X[:split],y[:split]) valid_set_x,valid_set_y = self.shared_dataset(X[split:2*split], y[split:2*split]) test_set_x,test_set_y = self.shared_dataset(X[2*split:],y[2*split:]) # compute number of minibatches for training, validation and testing n_train_batches = train_set_x.get_value(borrow=True).shape[0]/self.batch_size n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]/self.batch_size n_test_batches = test_set_x.get_value(borrow=True).shape[0]/self.batch_size ########################################## # initialise the parameters of the model # ########################################## # allocate symbolic variables for the data index = T.lscalar() # index to a [mini]batch x = T.matrix('x') # the data is presented as rasterized images y = T.ivector('y') # the labels are presented as 1D vector of # [int] labels rng = numpy.random.RandomState(random_state) # construct the MLP class classifier = MLP(rng=rng, input=x, n_in=d_in * d_in, n_hidden=self.n_hidden, n_out=d_out) # start-snippet-4 # the cost we minimize during training is the negative log likelihood of # the model plus the regularization terms (L1 and L2); cost is expressed # here symbolically cost = (classifier.negative_log_likelihood(y) + self.L1_reg * classifier.L1 + self.L2_reg * classifier.L2_sqr) # compiling a Theano function that computes the mistakes that are made # by the model on a minibatch test_model = theano.function( inputs=[index], outputs=classifier.errors(y), givens={ x: test_set_x[index * self.batch_size:(index + 1) * self.batch_size], y: test_set_y[index * self.batch_size:(index + 1) * self.batch_size] }) # start-snippet-5 # compute the gradient of cost with respect to theta (sotred in params) # the resulting gradients will be stored in a list gparams gparams = [T.grad(cost, param) for param in classifier.params] # given two list the zip A = [a1, a2, a3, a4] and B = [b1, b2, b3, b4] of # same length, zip generates a list C of same size, where each element # is a pair formed from the two lists : # C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)] updates = [ (param, param - self.learning_rate * gparam) for param, gparam in zip(classifier.params, gparams) ] # compiling a Theano function `train_model` that returns the cost, but # in the same time updates the parameter of the model based on the rules # defined in `updates` train_model = theano.function( inputs=[index], outputs=cost, updates=updates, givens={ x: train_set_x[index * self.batch_size: (index + 1) * self.batch_size], y: train_set_y[index * self.batch_size: (index + 1) * self.batch_size] } ) ############### # TRAIN MODEL # ############### if self.verbose: print('... training') # early-stopping parameters patience = 50000 # look as this many examples regardless patience_increase = 2 # wait this much longer when a new best is # found improvement_threshold = 0.995 # a relative improvement of this much is # considered significant validation_frequency = min(n_train_batches, patience / 2) # go through this many # minibatche before checking the network # on the validation set; in this case we # check every epoch best_validation_loss = numpy.inf best_i = 0 test_score = 0. epoch = 0 done_looping = False while (epoch < self.n_epochs) and (not done_looping): epoch = epoch + 1 for minibatch_index in range(int(n_train_batches)): minibatch_avg_cost = train_model(minibatch_index) # iteration number i = (epoch - 1) * n_train_batches + minibatch_index if (i + 1) % validation_frequency == 0: # compute zero-one loss on validation set validation_losses = [validate_model(i) for i in range(int(n_valid_batches))] this_validation_loss = numpy.mean(validation_losses) if self.verbose: print('epoch %i, minibatch %i/%i, validation error %f %%' % ( epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100. )) # if we got the best validation score until now if this_validation_loss < best_validation_loss: #improve patience if loss improvement is good enough if ( this_validation_loss < best_validation_loss * improvement_threshold ): patience = max(patience, i * patience_increase) best_validation_loss = this_validation_loss best_i = i # test it on the test set test_losses = [test_model(i) for i in range(int(n_test_batches))] test_score = numpy.mean(test_losses) if self.verbose: print((' epoch %i, minibatch %i/%i, test error of ' 'best model %f %%') % (epoch, minibatch_index + 1, n_train_batches, test_score * 100.)) if patience <= i: done_looping = True break if self.verbose: print(('Optimization complete. Best validation score of %f %% ' 'obtained at iation %i, with test performance %f %%') % (best_validation_loss * 100., best_i + 1, test_score * 100.)) def score(self,X,y): """ Takes the X and y and produces a score for the performance of the model. """ validate_model = theano.function( inputs=[index], outputs=classifier.errors(y), givens={ x: valid_set_x[index * batch_size:(index + 1) * batch_size], y: valid_set_y[index * batch_size:(index + 1) * batch_size] }) def shared_dataset(self, data_x, data_y, borrow=True): """ Function that loads the dataset into shared variables The reason we store our dataset in shared variables is to allow Theano to copy it into the GPU memory (when code is run on GPU). Since copying data into the GPU is slow, copying a minibatch everytime is needed (the default behaviour if the data is not in a shared variable) would lead to a large decrease in performance. """ shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX), borrow=borrow) shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX), borrow=borrow) # When storing data on the GPU it has to be stored as floats # therefore we will store the labels as ``floatX`` as well # (``shared_y`` does exactly that). But during our computations # we need them as ints (we use labels as index, and if they are # floats it doesn't make sense) therefore instead of returning # ``shared_y`` we will have to cast it to int. This little hack # lets ous get around this issue return shared_x, T.cast(shared_y, 'int32') ```
[ { "content": "Output the full code verbatim (no extra comments):\n```python\nfrom eden.model import _CrontabPattern, _ParsedSpec, Task, User\nfrom datetime import datetime, timedelta\nimport unittest\n\nclass TaskTest(unittest.TestCase):\n\n def test_new(self):\n task = Task(1, 'task_id', 'name', 'job...
[ { "content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\nfrom eden.model import _CrontabPattern, _ParsedSpec, Task, User\nfrom datetime import datetime, timedelta\nimport unittest\n\nclass TaskTest(unittest.TestCase):\n\n def test_new(self):\n task = Task(1, 'task_i...
```python from eden.model import _CrontabPattern, _ParsedSpec, Task, User from datetime import datetime, timedelta import unittest class TaskTest(unittest.TestCase): def test_new(self): task = Task(1, 'task_id', 'name', 'job.test', {'args':(), 'kw':{}}, 'every 5') self.assertEquals((task.name, task.event), ('name', 'every 5')) self.assertEquals(task.pattern, ['every', '5']) self.assertRaises(TypeError, lambda: task(1, 'task_id', 'name', 'every')) self.assertRaises(TypeError, lambda: task(1, 'task_id', 'name', 'every x')) self.assertRaises(TypeError, lambda: task(1, 'task_id', 'name', 'at xx')) def test_gen_next_run(self): task = Task(1, 'task_id', 'name', 'job.test', {'args':(), 'kw':{}}, 'every 5', datetime.strptime("8/8/2014 16:35", "%d/%m/%Y %H:%M"), datetime.strptime("8/8/2014 16:30", "%d/%m/%Y %H:%M")) self.assertEqual(task.next_run, datetime.strptime("8/8/2014 16:35", "%d/%m/%Y %H:%M")) self.assertTrue(task.gen_next_run() > datetime.strptime("8/8/2014 16:35", "%d/%m/%Y %H:%M")) def test_is_running(self): task = Task(1, 'task_id', 'name', 'job.test', {'args':(), 'kw':{}}, 'every 5', datetime.strptime("8/8/2014 16:35", "%d/%m/%Y %H:%M"), datetime.strptime("8/8/2014 16:30", "%d/%m/%Y %H:%M")) self.assertEqual(task.is_running(), False) def test_fresh(self): task = Task(1, 'task_id', 'name', 'job.test', {'args':(), 'kw':{}}, 'every 5') res = task.fresh() assert res == True assert task.task_id is None assert task.status == Task.SCHEDULED assert task.run_times == 1 task = Task(1, 'task_id', 'name', 'job.test', {'args':(), 'kw':{}}, 'every 5') task.event = 'at 20141111 1212' res = task.fresh() assert res == False def test_retry(self): task = Task(1, 'task_id', 'name', 'job.test', {'args':(), 'kw':{}}, 'every 5') res = task.retry() assert res == True assert task.attempts == 1 assert task.status == Task.RETRY task = Task(1, 'task_id', 'name', 'job.test', {'args':(), 'kw':{}}, 'every 5') task.attempts = 4 res = task.retry() assert res == False assert task.status == Task.ABORTED assert task.run_times == 1 def test_event_type(self): t = datetime.now() + timedelta(minutes=5) task = Task(1, 'task_id', 'name', 'job.test', {'args':(), 'kw':{}}, 'at ' + t.strftime('%Y%m%d%H%M')) self.assertEqual(task.event_type, 'at') task.event = 'every 5' self.assertEqual(task.event_type, 'every') class _CrontabPatternTest(unittest.TestCase): def setUp(self): self.pat = _CrontabPattern() def test_validate(self): self.assertTrue(self.pat.validate('7 0-23 1-32 1-12 0-7')) self.assertTrue(self.pat.validate('0-59/2 0-23 1-32 1-12 0-7')) self.assertFalse(self.pat.validate('0-23 1-32 1-12 0-7')) self.assertFalse(self.pat.validate('*7xx 0-23 1-32 1-12 0-7')) def test_parse(self): self.assertEqual(self.pat.parse('0-59/2 0-23 1-32 1-12 0-7'), _ParsedSpec(minute=set([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58]), hour=set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]), dom=set([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]), month=set([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]), dow=set([0, 1, 2, 3, 4, 5, 6, 7]))) self.assertEqual(self.pat.parse('7 0-23 1-32 1-12 0-7'), _ParsedSpec(minute=set([7]), hour=set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]), dom=set([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]), month=set([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]), dow=set([0, 1, 2, 3, 4, 5, 6, 7]))) def test_gen_next_run(self): gens = [datetime(2012, 4, 29), datetime(2012, 4, 29, 0, 0), datetime(2012, 4, 29, 0, 7), datetime(2012, 4, 29, 0, 14), datetime(2012, 4, 29, 0, 21), datetime(2012, 4, 29, 0, 28), datetime(2012, 4, 29, 0, 35), datetime(2012, 4, 29, 0, 42), datetime(2012, 4, 29, 0, 49), datetime(2012, 4, 29, 0, 56), datetime(2012, 4, 29, 1, 0) ] for i, gen in enumerate(gens[0:10]): self.assertEqual(self.pat.gen_next_run('*/7 * * * *', gen), gens[i]) class UserTest(unittest.TestCase): def setUp(self): self.new_user = User('username', 'email', 'real_name', 'password', 'status') self.db_user = User('username', 'email', 'real_name', 'd63dc919e201d7bc4c825630d2cf25fdc93d4b2f0d46706d29038d01', 'status', uid=1) def test_secure_passwod(self): self.assertEqual(self.new_user.secure_password('password'), 'd63dc919e201d7bc4c825630d2cf25fdc93d4b2f0d46706d29038d01') def test_check(self): self.assertTrue(self.new_user.check('password')) self.assertTrue(self.db_user.check('password')) if __name__ == '__main__': unittest.main() ```
[ { "content": "Replicate the source code:\n```python\nfrom rest_framework import serializers\nfrom django.contrib.auth.hashers import make_password\nfrom rest_framework.exceptions import ValidationError\nfrom django.contrib.auth import get_user_model\nfrom common.serializers import JobSerializer\nfrom job_auth.s...
[ { "content": "Replicate the source code:\n<|memory_start|>```python\nfrom rest_framework import serializers\nfrom django.contrib.auth.hashers import make_password\nfrom rest_framework.exceptions import ValidationError\nfrom django.contrib.auth import get_user_model\nfrom common.serializers import JobSerializer\...
```python from rest_framework import serializers from django.contrib.auth.hashers import make_password from rest_framework.exceptions import ValidationError from django.contrib.auth import get_user_model from common.serializers import JobSerializer from job_auth.serializers import UserSerializer from .models import( Employer, EmployerJobs ) class EmployerSerializer(serializers.ModelSerializer): user = UserSerializer(required=True) employer_location = serializers.CharField(read_only=True) def create(self, validated_data): try: user = get_user_model().objects.get( username=validated_data['user']['username']) except get_user_model().DoesNotExist: validated_data['user']['password'] = make_password( validated_data['user']['password']) user = get_user_model().objects.create(**validated_data['user']) validated_data['user'] = user return super(EmployerSerializer, self).create(validated_data) class Meta: model = Employer class EmployerJobsSerializer(serializers.ModelSerializer): job = JobSerializer(required=True) employer_details = serializers.SerializerMethodField() def create(self, validated_data): job = self.initial_data['job'] job_serializer = JobSerializer(data=job) if job_serializer.is_valid(): job_obj = job_serializer.save() validated_data['job'] = job_obj return super(EmployerJobsSerializer, self).create(validated_data) else: raise ValidationError(job_serializer.errors) def get_employer_details(self, obj): return EmployerSerializer(obj.employer).data class Meta: model = EmployerJobs ```
[ { "content": "Here is a code snippet:\n```python\n#!/usr/bin/env python\n# -*- coding: ascii -*-\n\nr\"\"\"\nSupport routines for exhaustive equation search in curve fit routines.\n\nUses itertools to find all combinations.\n\"\"\"\nfrom __future__ import print_function\nfrom builtins import str\n\n\nfrom itert...
[ { "content": "Here is a code snippet:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: ascii -*-\n\nr\"\"\"\nSupport routines for exhaustive equation search in curve fit routines.\n\nUses itertools to find all combinations.\n\"\"\"\nfrom __future__ import print_function\nfrom builtins import str...
```python #!/usr/bin/env python # -*- coding: ascii -*- r""" Support routines for exhaustive equation search in curve fit routines. Uses itertools to find all combinations. """ from __future__ import print_function from builtins import str from itertools import combinations from math import factorial full_funcL = ['const','x','x**2','x**3','x**4','x**5','x**6','log(x)','exp(x)'] full_xtranL = ['x','1/x','log(x)','exp(x)','(1/log(x))','log(1/x)','(1/exp(x))','exp(1/x)'] full_ytranL = ['y','1/y','y**2','y**3','log(y)','exp(y)','1/log(y)','log(1/y)','1/exp(y)','exp(1/y)'] def next_term( func='x', xtranL=None ): k = func.rfind('x') if k==-1: yield func else: for xt in xtranL: # try to avoid double parens if '(x)'in func and xt[0]=='(' and xt[-1]==')': yield func[:k-1] + xt + func[k+2:] else: yield func[:k] + xt + func[k+1:] def build_xterms( funcL=None, xtranL=None ): termL = [] for f in funcL: for nt in next_term(func=f, xtranL=xtranL): if nt not in termL: termL.append( nt ) return termL # copied from itertools doc page. # http://docs.python.org/2/library/itertools.html#itertools.combinations def num_combinations(iterable, r): n = len(iterable) if r>n: return 0 return factorial(n) / factorial(r) / factorial(n-r) if __name__ == "__main__": for nt in next_term(func='x', xtranL=full_xtranL): print(nt) print() funcL=['const','x','x**2','exp(x)'] xtranL=['x','(1/x)','exp(x)'] print("termL for funcL=%s\n and xtranL=%s\n"%(str(funcL), str(xtranL))) termL = build_xterms( funcL=funcL, xtranL=xtranL ) print(termL) print() print('# 3 Term equations =',num_combinations(termL, 3)) for i,rhs in enumerate(combinations(termL, 3)): print('%3i)'%(i+1,),rhs) ```
[ { "content": "```python\nfrom django.conf.urls import url, patterns, include\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.cache import cache_page\nfrom common.views import APIRoot, root_redirect_view\n\nfrom rest_auth.views import (\n Login, Logout, UserDetails, Pa...
[ { "content": "<|memory_start|>```python\nfrom django.conf.urls import url, patterns, include\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.cache import cache_page\nfrom common.views import APIRoot, root_redirect_view\n\nfrom rest_auth.views import (\n Login, Logout,...
```python from django.conf.urls import url, patterns, include from django.contrib.auth.decorators import login_required from django.views.decorators.cache import cache_page from common.views import APIRoot, root_redirect_view from rest_auth.views import ( Login, Logout, UserDetails, PasswordChange, PasswordReset, PasswordResetConfirm ) rest_auth_patterns = patterns( # re-written from rest_auth.urls because of cache validation '', # URLs that do not require a session or valid token url(r'^password/reset/$', cache_page(0)(PasswordReset.as_view()), name='rest_password_reset'), url(r'^password/reset/confirm/$', cache_page(0)(PasswordResetConfirm.as_view()), name='rest_password_reset_confirm'), url(r'^login/$', cache_page(0)(Login.as_view()), name='rest_login'), # URLs that require a user to be logged in with a valid session / token. url(r'^logout/$', cache_page(0)(Logout.as_view()), name='rest_logout'), url(r'^user/$', cache_page(0)(UserDetails.as_view()), name='rest_user_details'), url(r'^password/change/$', cache_page(0)(PasswordChange.as_view()), name='rest_password_change'), ) apipatterns = patterns( '', url(r'^$', login_required( cache_page(60*60)(APIRoot.as_view())), name='root_listing'), url(r'^explore/', include('rest_framework_swagger.urls', namespace='swagger')), url(r'^common/', include('common.urls', namespace='common')), url(r'^users/', include('users.urls', namespace='users')), url(r'^facilities/', include('facilities.urls', namespace='facilities')), url(r'^chul/', include('chul.urls', namespace='chul')), url(r'^gis/', include('mfl_gis.urls', namespace='mfl_gis')), url(r'^reporting/', include('reporting.urls', namespace='reporting')), url(r'^rest-auth/', include(rest_auth_patterns, namespace='rest_auth')), url(r'^rest-auth/registration/', include('rest_auth.registration.urls', namespace='rest_auth_registration')) ) urlpatterns = patterns( '', url(r'^$', root_redirect_view, name='root_redirect'), url(r'^api/', include(apipatterns, namespace='api')), url(r'^accounts/', include('rest_framework.urls', namespace='rest_framework')), url(r'^api/token/', 'rest_framework.authtoken.views.obtain_auth_token'), url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')), ) ```
[ { "content": "Recreate the entire code block with identical formatting:\n```python\n# -*- coding: utf-8 -*-\nimport re\nfrom traceback import format_exc\n\nimport pytest\nfrom wrapanapi.utils import eval_strings\n\nfrom cfme.containers.provider import ContainersProvider\nfrom cfme.utils.appliance.implementation...
[ { "content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nimport re\nfrom traceback import format_exc\n\nimport pytest\nfrom wrapanapi.utils import eval_strings\n\nfrom cfme.containers.provider import ContainersProvider\nfrom cfme.utils.applianc...
```python # -*- coding: utf-8 -*- import re from traceback import format_exc import pytest from wrapanapi.utils import eval_strings from cfme.containers.provider import ContainersProvider from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.wait import TimedOutError pytestmark = [ pytest.mark.usefixtures('setup_provider'), pytest.mark.meta( server_roles='+ems_metrics_coordinator +ems_metrics_collector +ems_metrics_processor'), pytest.mark.tier(1), pytest.mark.long_running_env, pytest.mark.provider([ContainersProvider], scope='function') ] @pytest.fixture(scope='module') def node_hardwares_db_data(appliance): """Grabbing hardwares table data for nodes""" db = appliance.db.client hardwares_table = db['hardwares'] container_nodes = db['container_nodes'] out = {} for node in db.session.query(container_nodes).all(): out[node.name] = hardwares_table.__table__.select().where( hardwares_table.id == node.id ).execute().fetchone() return out def get_vpor_data_by_name(vporizer_, name): return [vals for vals in vporizer_ if vals.resource_name == name] def get_report(appliance, menu_name, candu=False): """Queue a report by menu name , wait for finish and return it""" try: saved_report = appliance.collections.reports.instantiate( type='Configuration Management', subtype='Containers', menu_name=menu_name, is_candu=candu ).queue(wait_for_finish=True) except TimedOutError: pytest.skip('Could not find report "{}" in containers.\nTraceback:\n{}' .format(menu_name, format_exc())) return saved_report def test_container_reports_base_on_options(soft_assert, appliance): """This test verifies that all containers options are available in the report 'based on' Dropdown in the report creation Polarion: assignee: juwatts caseimportance: medium casecomponent: Containers initialEstimate: 1/6h """ view = navigate_to(appliance.collections.reports, 'Add') for base_on in ( 'Chargeback for Images', 'Container Images', 'Container Services', 'Container Templates', 'Containers', re.compile(r'Performance - Container\s*Nodes'), re.compile(r'Performance - Container\s*Projects'), 'Performance - Containers' ): compare = (base_on.match if hasattr(base_on, 'match') else base_on.__eq__) option = [opt for opt in view.base_report_on.all_options if compare(str(opt.text))] soft_assert(option, 'Could not find option "{}" for base report on.'.format(base_on)) def test_report_pods_per_ready_status(appliance, soft_assert, provider): """Testing 'Pods per Ready Status' report, see polarion case for more info Polarion: assignee: juwatts caseimportance: medium casecomponent: Containers initialEstimate: 1/6h """ pods_per_ready_status = provider.pods_per_ready_status() report = get_report(appliance, 'Pods per Ready Status') for row in report.data.rows: name = row['# Pods per Ready Status'] readiness_ui = bool(eval_strings([row['Ready Condition Status']]).pop()) if soft_assert(name in pods_per_ready_status, # this check based on BZ#1435958 'Could not find pod "{}" in openshift.' .format(name)): expected_readiness = bool(all(pod for pod in pods_per_ready_status.get(name, False))) soft_assert(expected_readiness == readiness_ui, 'For pod "{}" expected readiness is "{}" Found "{}"' .format(name, expected_readiness, readiness_ui)) def test_report_nodes_by_capacity(appliance, soft_assert, node_hardwares_db_data): """Testing 'Nodes By Capacity' report, see polarion case for more info Polarion: assignee: juwatts caseimportance: medium casecomponent: Containers initialEstimate: 1/6h """ report = get_report(appliance, 'Nodes By Capacity') for row in report.data.rows: hw = node_hardwares_db_data[row['Name']] soft_assert(hw.cpu_total_cores == int(row['CPU Cores']), 'Number of CPU cores is wrong: expected {}' ' got {}'.format(hw.cpu_total_cores, row['CPU Cores'])) # The following block is to convert whatever we have to MB memory_ui = float(re.sub(r'[a-zA-Z,]', '', row['Memory'])) if 'gb' in row['Memory'].lower(): memory_mb_ui = memory_ui * 1024 # Shift hw.memory_mb to GB, round to the number of decimals of memory_mb_db # and shift back to MB: memory_mb_db = round(hw.memory_mb / 1024.0, len(str(memory_mb_ui).split('.')[1])) * 1024 else: # Assume it's MB memory_mb_ui = memory_ui memory_mb_db = hw.memory_mb soft_assert(memory_mb_ui == memory_mb_db, 'Memory (MB) is wrong for node "{}": expected {} got {}' .format(row['Name'], memory_mb_ui, memory_mb_db)) def test_report_nodes_by_cpu_usage(appliance, soft_assert, vporizer): """Testing 'Nodes By CPU Usage' report, see polarion case for more info Polarion: assignee: juwatts caseimportance: medium casecomponent: Containers initialEstimate: 1/6h """ report = get_report(appliance, 'Nodes By CPU Usage') for row in report.data.rows: vpor_values = get_vpor_data_by_name(vporizer, row["Name"])[0] usage_db = round(vpor_values.max_cpu_usage_rate_average, 2) usage_report = round(float(row['CPU Usage (%)']), 2) soft_assert(usage_db == usage_report, 'CPU usage is wrong for node "{}": expected {} got {}' .format(row['Name'], usage_db, usage_report)) def test_report_nodes_by_memory_usage(appliance, soft_assert, vporizer): """Testing 'Nodes By Memory Usage' report, see polarion case for more info Polarion: assignee: juwatts caseimportance: medium casecomponent: Containers initialEstimate: 1/6h """ report = get_report(appliance, 'Nodes By Memory Usage') for row in report.data.rows: vpor_values = get_vpor_data_by_name(vporizer, row["Name"])[0] usage_db = round(vpor_values.max_mem_usage_absolute_average, 2) usage_report = round(float(row['Memory Usage (%)']), 2) soft_assert(usage_db == usage_report, 'CPU usage is wrong for node "{}": expected {} got {}.' .format(row['Name'], usage_db, usage_report)) def test_report_number_of_nodes_per_cpu_cores(appliance, soft_assert, node_hardwares_db_data): """Testing 'Number of Nodes per CPU Cores' report, see polarion case for more info Polarion: assignee: juwatts caseimportance: medium casecomponent: Containers initialEstimate: 1/6h """ report = get_report(appliance, 'Nodes by Number of CPU Cores') for row in report.data.rows: hw = node_hardwares_db_data[row['Name']] soft_assert(hw.cpu_total_cores == int(row['Hardware Number of CPU Cores']), 'Hardware Number of CPU Cores is wrong for node "{}": expected {} got {}.' .format(row['Name'], hw.cpu_total_cores, row['Hardware Number of CPU Cores'])) def test_report_projects_by_number_of_pods(appliance, soft_assert): """Testing 'Projects by Number of Pods' report, see polarion case for more info Polarion: assignee: juwatts caseimportance: medium casecomponent: Containers initialEstimate: 1/6h """ container_projects = appliance.db.client['container_projects'] container_pods = appliance.db.client['container_groups'] report = get_report(appliance, 'Projects by Number of Pods') for row in report.data.rows: pods_count = len(container_pods.__table__.select().where( container_pods.container_project_id == container_projects.__table__.select().where( container_projects.name == row['Project Name']).execute().fetchone().id ).execute().fetchall()) soft_assert(pods_count == int(row['Number of Pods']), 'Number of pods is wrong for project "{}". expected {} got {}.' .format(row['Project Name'], pods_count, row['Number of Pods'])) def test_report_projects_by_cpu_usage(appliance, soft_assert, vporizer): """Testing 'Projects By CPU Usage' report, see polarion case for more info Polarion: assignee: juwatts caseimportance: medium casecomponent: Containers initialEstimate: 1/6h """ report = get_report(appliance, 'Projects By CPU Usage') for row in report.data.rows: vpor_values = get_vpor_data_by_name(vporizer, row["Name"])[0] usage_db = round(vpor_values.max_cpu_usage_rate_average, 2) usage_report = round(float(row['CPU Usage (%)']), 2) soft_assert(usage_db == usage_report, 'CPU usage is wrong for project "{}": expected {} got {}' .format(row['Name'], usage_db, usage_report)) def test_report_projects_by_memory_usage(appliance, soft_assert, vporizer): """Testing 'Projects By Memory Usage' report, see polarion case for more info Polarion: assignee: juwatts caseimportance: medium casecomponent: Containers initialEstimate: 1/6h """ report = get_report(appliance, 'Projects By Memory Usage') for row in report.data.rows: vpor_values = get_vpor_data_by_name(vporizer, row["Name"])[0] usage_db = round(vpor_values.max_mem_usage_absolute_average, 2) usage_report = round(float(row['Memory Usage (%)']), 2) soft_assert(usage_db == usage_report, 'CPU usage is wrong for project "{}": expected {} got {}.' .format(row['Name'], usage_db, usage_report)) def test_report_pod_counts_for_container_images_by_project(appliance, provider, soft_assert): """Testing 'Pod counts For Container Images by Project' report,\ see polarion case for more info Polarion: assignee: juwatts caseimportance: medium casecomponent: Containers initialEstimate: 1/6h """ report = get_report(appliance, 'Pod counts For Container Images by Project', candu=True) pods_api = provider.mgmt.list_pods() pods_per_project = {} for project in provider.mgmt.list_project_names(): pods_per_project[project] = [ pd for pd in pods_api if pd.metadata.namespace == project] rows = list(report.data.rows) for row in rows: project_name, pod_name = row['Project Name'], row['Pod Name'] pod = filter(lambda pd: pd.metadata.name == pod_name, pods_per_project[project_name]) soft_assert(pod, 'Could not find pod "{}" of project "{}" in the report.' .format(pod_name, project_name)) pod = pod.pop() for pd in pods_per_project[project_name]: expected_image = pd.spec.containers[0].image pod_images = [r['Image Name'] for r in rows if r['Pod Name'] == pod_name] # Use 'in' since the image name in the API may include also registry and tag soft_assert(filter(lambda img_nm: img_nm in expected_image, pod_images), 'Could not find image "{}" in pod "{}". Pod images in report: {}' .format(expected_image, pod_name, pod_images)) def test_report_recently_discovered_pods(appliance, provider, soft_assert): """Testing 'Recently Discovered Pods' report, see polarion case for more info Polarion: assignee: juwatts caseimportance: medium casecomponent: Containers initialEstimate: 1/6h """ report = get_report(appliance, 'Recently Discovered Pods') pods_in_report = [row['Name'] for row in report.data.rows] pods_per_ready_status = provider.pods_per_ready_status() for pod in pods_per_ready_status.keys(): soft_assert(pod in pods_in_report, 'Could not find pod "{}" in report.'.format(pod)) def test_report_number_of_images_per_node(appliance, provider, soft_assert): """Testing 'Number of Images per Node' report, see polarion case for more info Polarion: assignee: juwatts caseimportance: medium casecomponent: Containers initialEstimate: 1/6h """ pods_api = provider.mgmt.list_pods() report = get_report(appliance, 'Number of Images per Node', candu=True) report_data = list(report.data.rows) for pod in pods_api: expected_image = pod.spec.containers[0].image node = pod.spec.node_name pod_name = pod.metadata.name pod_images = [row['Image Name'] for row in report_data if row['Pod Name'] == pod_name and row['Node Name'] == node] # Use 'in' since the image name in the API may include also registry and tag is_image = filter(lambda img_nm: img_nm in expected_image, pod_images) soft_assert(is_image, 'Expected image for pod "{0}" in node {1} is "{2}". found images: {3}' .format(pod_name, node, expected_image, pod_images)) def test_report_projects_by_number_of_containers(appliance, provider, soft_assert): """Testing 'Projects by Number of Containers' report, see polarion case for more info Polarion: assignee: juwatts caseimportance: medium casecomponent: Containers initialEstimate: 1/6h """ report = get_report(appliance, 'Projects by Number of Containers') pods_api = provider.mgmt.list_pods() # Since there is no provider column, in case of more than 1 provider we get some projects # multiple times in the report. Because of that for each project name we are collecting # all the 'Containers Count' columns and then checking that the containers count that we # fetched from the API is found _in_ the counts under this project name projects_containers_count = {} for row in report.data.rows: if row['Project Name'] not in projects_containers_count: projects_containers_count[row['Project Name']] = [] projects_containers_count[row['Project Name']].append(int(row['Containers Count'])) for project_name, containers_counts in projects_containers_count.items(): containers_counts_api = sum( [len(pod.spec.containers) for pod in pods_api if pod.metadata.namespace == project_name] ) soft_assert(containers_counts_api in containers_counts, 'Expected containers count for project {} should be {}. Found {} instead.' .format(project_name, containers_counts_api, containers_counts_api)) ```
[ { "content": "Here is the source code:\n```python\n# -*- coding: utf-8 -*-\n\n#+---------------------------------------------------------------------------+\n#| 01001110 01100101 01110100 01111010 01101111 01100010 |\n#| ...
[ { "content": "Here is the source code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n#+---------------------------------------------------------------------------+\n#| 01001110 01100101 01110100 01111010 01101111 01100010 |\n#| ...
```python # -*- coding: utf-8 -*- #+---------------------------------------------------------------------------+ #| 01001110 01100101 01110100 01111010 01101111 01100010 | #| | #| Netzob : Inferring communication protocols | #+---------------------------------------------------------------------------+ #| Copyright (C) 2011 Georges Bossert and Frédéric Guihéry | #| This program is free software: you can redistribute it and/or modify | #| it under the terms of the GNU General Public License as published by | #| the Free Software Foundation, either version 3 of the License, or | #| (at your option) any later version. | #| | #| This program is distributed in the hope that it will be useful, | #| but WITHOUT ANY WARRANTY; without even the implied warranty of | #| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | #| GNU General Public License for more details. | #| | #| You should have received a copy of the GNU General Public License | #| along with this program. If not, see <http://www.gnu.org/licenses/>. | #+---------------------------------------------------------------------------+ #| @url : http://www.netzob.org | #| @contact : contact@netzob.org | #| @sponsors : Amossys, http://www.amossys.fr | #| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ | #+---------------------------------------------------------------------------+ #+---------------------------------------------------------------------------+ #| Standard library imports #+---------------------------------------------------------------------------+ #+---------------------------------------------------------------------------+ #| Related third party imports #+---------------------------------------------------------------------------+ #+---------------------------------------------------------------------------+ #| Local application imports #+---------------------------------------------------------------------------+ from netzob.Common.Plugins.Extensions.GlobalMenuExtension import GlobalMenuExtension class CapturerMenuExtension(GlobalMenuExtension): def __init__(self, netzob, controller, actionName, menuText, menuStock=None, menuAccel=None, menuTooltip=None): super(GlobalMenuExtension, self).__init__() self.netzob = netzob self.actionName = actionName self.menuText = menuText self.menuStock = menuStock self.menuAccel = menuAccel self.menuTooltip = menuTooltip self.controller = controller def getUIDefinition(self): uiDefinition = """ <ui> <menubar name='MenuBar'> <menu action='Project'> <menu action='CaptureMessages'> <menuitem action='{0}' /> </menu> </menu> </menubar> </ui> """.format(self.actionName) return uiDefinition def getActions(self): actions = [ (self.actionName, self.menuStock, self.menuText, self.menuAccel, self.menuTooltip, self.executeAction)] return actions def executeAction(self, widget, vocabularyView, data=None): self.controller(vocabularyView) ```
[ { "content": "Return the code exactly, with no changes:\n```python\n#!/usr/bin/env python\n# encoding: utf-8\n\nimport asyncio\nimport logging\nfrom functools import singledispatch\nfrom bson.objectid import ObjectId\nfrom bson.errors import InvalidId\nfrom tbone.data.fields.mongo import DBRefField\nfrom tbone....
[ { "content": "Return the code exactly, with no changes:\n<|memory_start|>```python\n#!/usr/bin/env python\n# encoding: utf-8\n\nimport asyncio\nimport logging\nfrom functools import singledispatch\nfrom bson.objectid import ObjectId\nfrom bson.errors import InvalidId\nfrom tbone.data.fields.mongo import DBRefFi...
```python #!/usr/bin/env python # encoding: utf-8 import asyncio import logging from functools import singledispatch from bson.objectid import ObjectId from bson.errors import InvalidId from tbone.data.fields.mongo import DBRefField from tbone.db.models import MongoCollectionMixin, post_save from tbone.dispatch.channels.mongo import MongoChannel from tbone.resources import ModelResource from tbone.resources.verbs import * from tbone.resources.signals import * LIMIT = 20 OFFSET = 0 logger = logging.getLogger(__file__) class MongoResource(ModelResource): ''' A specialized ``Resource`` subclass used for creating API endpoints coupled to a MongoDB collection. Provides basic CRUD operations using standard HTTP verbs. ''' class Meta: channel_class = MongoChannel @property def limit(self): return LIMIT @property def offset(self): return OFFSET @classmethod async def emit(cls, db, key, data): pubsub = cls._meta.channel_class(name='pubsub', db=db) await pubsub.publish(key, data) # ---------- receivers ------------ # @classmethod @receiver(post_save) async def post_save(cls, sender, db, instance, created): if instance.pk is None: return async def _emit(event_name): resource = cls() obj = await instance.serialize() if cls._meta.hypermedia is True: resource.add_hypermedia(obj) await resource.emit(db, event_name, obj) if created is True and 'created' in cls._meta.outgoing_detail: await _emit('resource_create') elif created is False and 'updated' in cls._meta.outgoing_detail: await _emit('resource_update') @classmethod @receiver(resource_post_list) async def post_list(cls, sender, db, instances): ''' Hook to capture the results of a list query. Useful when wanting to know when certain documents have come up in a query. Implement in resource subclasses to provide domain-specific behavior ''' serialized_objects = await asyncio.gather(*[obj.serialize() for obj in instances]) await cls.emit(db, 'resource_get_list', serialized_objects) # ------------- resource overrides ---------------- # async def list(self, *args, **kwargs): ''' Corresponds to GET request without a resource identifier, fetching documents from the database ''' limit = int(kwargs.pop('limit', self.limit)) limit = 1000 if limit == 0 else limit # lets not go crazy here offset = int(kwargs.pop('offset', self.offset)) projection = None # perform full text search or standard filtering if self._meta.fts_operator in kwargs.keys(): filters = { '$text': {'$search': kwargs[self._meta.fts_operator]} } projection = {'score': {'$meta': 'textScore'}} sort = [('score', {'$meta': 'textScore'}, )] else: # build filters from query parameters filters = self.build_filters(**kwargs) # add custom query defined in resource meta, if exists if isinstance(self._meta.query, dict): filters.update(self._meta.query) # build sorts from query parameters sort = self.build_sort(**kwargs) if isinstance(self._meta.sort, list): sort.extend(self._meta.sort) cursor = self._meta.object_class.get_cursor(db=self.db, query=filters, projection=projection, sort=sort) cursor.skip(offset) cursor.limit(limit) total_count = await self._meta.object_class.count(db=self.db, filters=filters) object_list = await self._meta.object_class.find(cursor) # serialize results serialized_objects = await asyncio.gather(*[obj.serialize() for obj in object_list]) # signal post list asyncio.ensure_future(resource_post_list.send( sender=self._meta.object_class, db=self.db, instances=object_list) ) return { 'meta': { 'total_count': total_count, 'limit': limit, 'offset': offset }, 'objects': serialized_objects } async def detail(self, **kwargs): ''' Corresponds to GET request with a resource unique identifier, fetching a single document from the database ''' try: pk = self.pk_type(kwargs.get('pk')) obj = await self._meta.object_class.find_one(self.db, {self.pk: pk}) if obj: return await obj.serialize() raise NotFound('Object matching the given {} with value {} was not found'.format(self.pk, str(pk))) except InvalidId: raise NotFound('Invalid ID') async def create(self, **kwargs): ''' Corresponds to POST request without a resource identifier, inserting a document into the database ''' try: # create model obj = self._meta.object_class() # deserialize data from request body self.data.update(kwargs) await obj.deserialize(self.data) # create document in DB await obj.insert(db=self.db) # serialize object for response return await obj.serialize() except Exception as ex: logger.exception(ex) raise BadRequest(ex) async def modify(self, **kwargs): ''' Corresponds to PATCH request with a resource identifier, modifying a single document in the database ''' try: pk = self.pk_type(kwargs['pk']) # modify is a class method on MongoCollectionMixin result = await self._meta.object_class.modify(self.db, key=pk, data=self.data) if result is None: raise NotFound('Object matching the given {} was not found'.format(self.pk)) return await result.serialize() except Exception as ex: logger.exception(ex) raise BadRequest(ex) async def update(self, **kwargs): ''' Corresponds to PUT request with a resource identifier, updating a single document in the database ''' try: self.data[self.pk] = self.pk_type(kwargs['pk']) updated_obj = await self._meta.object_class().update(self.db, data=self.data) if updated_obj is None: raise NotFound('Object matching the given {} was not found'.format(self.pk)) return await updated_obj.serialize() except Exception as ex: logger.exception(ex) raise BadRequest(ex) async def delete(self, *args, **kwargs): ''' Corresponds to DELETE request with a resource identifier, deleting a single document from the database ''' pk = self.pk_type(kwargs['pk']) result = await self._meta.object_class.delete_entries(db=self.db, query={self.pk: pk}) if result.acknowledged: if result.deleted_count == 0: raise NotFound() else: raise BadRequest('Failed to delete object') def build_filters(self, **kwargs): ''' Break url parameters and turn into filters ''' filters = {} for param, value in kwargs.items(): # break each url parameter to key + operator (if exists) pl = dict(enumerate(param.split('__'))) key = pl[0] operator = pl.get(1, None) if key in self._meta.object_class.fields(): field = self._meta.object_class._fields[key] if field.is_composite: # composite keys require additional handling # currently covering cases for dbref and list if isinstance(field, DBRefField): key, value = self.process_dbref_filter(key, value) elif isinstance(value, list) and operator == 'in': value = [convert_value(v) for v in value] else: value = convert_value(value) # assign operator, if applicable filters[key] = {'${}'.format(operator): value} if operator else value elif key == 'created': # special case where we map `created` key to mongo's _id which also contains a creation timestamp dt = parser.parse(convert_value(value)) dummy_id = ObjectId.from_datetime(dt) filters['_id'] = {'${}'.format(operator): dummy_id} if operator else dummy_id return filters def process_dbref_filter(self, key, value): k = '{}.$id'.format(key) if isinstance(value, MongoCollectionMixin): v = value._id else: v = ObjectId(value) return k, v def build_sort(self, **kwargs): ''' Break url parameters and turn into sort arguments ''' sort = [] order = kwargs.get('order_by', None) if order: if type(order) is list: order = order[0] if order[:1] == '-': sort.append((order[1:], -1)) else: sort.append((order, 1)) return sort @singledispatch def convert_value(value): ''' Utility functions to convert url params to mongodb filter operators and values ''' raise NotImplementedError('Cannot convert this {}'.format(type(value))) @convert_value.register(list) def _(value): return convert_value(value[0]) @convert_value.register(bytes) def _(value): return convert_value(value.decode('utf-8')) @convert_value.register(ObjectId) def _(value): return value @convert_value.register(str) def _(value): reserved = { '': None, 'null': None, 'none': None, 'true': True, 'false': False } if value in reserved: return reserved[value] # check if value is of type ObjectId if ObjectId.is_valid(value): return ObjectId(value) # check if value is numeric and return a filter which checks both strings and integers if value.isnumeric(): value = {'$in': [int(value), value]} # return as string return value ```
[ { "content": "```python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2014 Metaswitch Networks\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n#...
[ { "content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2014 Metaswitch Networks\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the ...
```python # -*- coding: utf-8 -*- # Copyright (c) 2014 Metaswitch Networks # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ felix.fsocket ~~~~~~~~~~~~ Function for managing ZeroMQ sockets. """ import json import logging import time import zmq from calico.felix import futils log = logging.getLogger(__name__) class Socket(object): """ Socket is an encapsulation of a 0MQ socket wrapping the messaging logic. It handles connecting and signalling errors, and maintains state about the message flows. """ # Socket types TYPE_EP_REQ = "EP REQ" TYPE_EP_REP = "EP REP" TYPE_ACL_REQ = "ACL REQ" TYPE_ACL_SUB = "ACL SUB" ALL_TYPES = set((TYPE_EP_REQ, TYPE_EP_REP, TYPE_ACL_REQ, TYPE_ACL_SUB)) REQUEST_TYPES = set((TYPE_EP_REQ, TYPE_ACL_REQ)) ACL_TYPES = set((TYPE_ACL_REQ, TYPE_ACL_SUB)) EP_TYPES = set((TYPE_EP_REQ, TYPE_EP_REP)) PORT = {TYPE_EP_REQ: 9901, TYPE_EP_REP: 9902, TYPE_ACL_REQ: 9905, TYPE_ACL_SUB: 9906} ZTYPE = {TYPE_EP_REQ: zmq.REQ, TYPE_EP_REP: zmq.REP, TYPE_ACL_REQ: zmq.REQ, TYPE_ACL_SUB: zmq.SUB} def __init__(self, type, config): self.config = config self.type = type self.remote_addr = None self.port = Socket.PORT[type] self._zmq = None self.last_activity = None self.request_outstanding = False if type in Socket.EP_TYPES: self.remote_addr = self.config.PLUGIN_ADDR else: self.remote_addr = self.config.ACL_ADDR def close(self): """ Close this connection cleanly. """ if self._zmq is not None: self._zmq.close() self._zmq = None def communicate(self, hostname, context): """ Create and connect / bind a socket """ log.info( "Creating socket to entity %s:%d", self.remote_addr, self.port ) self._zmq = context.socket(Socket.ZTYPE[self.type]) if self.type == Socket.TYPE_EP_REP: self._zmq.bind("tcp://%s:%s" % (self.config.LOCAL_ADDR, self.port)) else: self._zmq.connect("tcp://%s:%s" % (self.remote_addr, self.port)) if self.type == Socket.TYPE_ACL_SUB: self._zmq.setsockopt(zmq.IDENTITY, hostname) self._zmq.setsockopt(zmq.SUBSCRIBE, 'aclheartbeat') # The socket connection event is always the time of last activity. self.last_activity = futils.time_ms() # We do not have a request outstanding. self.request_outstanding = False def send(self, msg): """ Send a specified message on a socket. """ log.info("Sent %s on socket %s" % (msg.descr, self.type)) self.last_activity = futils.time_ms() #*********************************************************************# #* We never expect any type of socket that we use to block since we *# #* use only REQ or REP sockets - so if we get blocking then we *# #* consider that something is wrong, and let the exception take down *# #* Felix. *# #*********************************************************************# try: self._zmq.send(msg.zmq_msg, zmq.NOBLOCK) if self.type in Socket.REQUEST_TYPES: self.request_outstanding = True except: log.exception("Socket %s blocked on send", self.type) raise def receive(self): """ Receive a message on this socket. For subscriptions, this will return a list of bytes. """ log.debug("Received something on %s", self.type) #*********************************************************************# #* We never expect any type of socket that we use to block since we *# #* just polled to check - so if we get blocking then we consider *# #* that something is wrong, and let the exception take down Felix. *# #*********************************************************************# try: if self.type != Socket.TYPE_ACL_SUB: data = self._zmq.recv(zmq.NOBLOCK) uuid = None else: uuid, data = self._zmq.recv_multipart(zmq.NOBLOCK) except: log.exception("Socket %s blocked on receive", self.type) raise message = Message.parse_message(data, uuid) # Log that we received the message. log.info("Received %s on socket %s" % (message.descr, self.type)) # If this is a response, we're no longer waiting for one. if self.type in Socket.REQUEST_TYPES: self.request_outstanding = False self.last_activity = futils.time_ms() # A special case: heartbeat messages on the subscription interface are # swallowed; the application code has no use for them. if (self.type == Socket.TYPE_ACL_SUB and message.type == Message.TYPE_HEARTBEAT): return None return message def timed_out(self): """ Returns True if the socket has been inactive for at least the timeout; all sockets must have heartbeats on them. """ return ((futils.time_ms() - self.last_activity) > self.config.CONN_TIMEOUT_MS) def keepalive_due(self): """ Returns True if we are due to send a keepalive on the socket. The caller is responsible for deciding which sockets need keepalives. """ return ((futils.time_ms() - self.last_activity) > self.config.CONN_KEEPALIVE_MS) class Message(object): """This represents a message either sent or received by Felix.""" TYPE_RESYNC = "RESYNCSTATE" TYPE_EP_CR = "ENDPOINTCREATED" TYPE_EP_UP = "ENDPOINTUPDATED" TYPE_EP_RM = "ENDPOINTDESTROYED" TYPE_GET_ACL = "GETACLSTATE" TYPE_ACL_UPD = "ACLUPDATE" TYPE_HEARTBEAT = "HEARTBEAT" def __init__(self, type, fields, endpoint_id=None): #: The type of the message. self.type = type #: The description of the message, used for logging only. if type == Message.TYPE_RESYNC and 'resync_id' in fields: self.descr = "%s(%s)" % (type, fields['resync_id']) elif endpoint_id is not None: self.descr = "%s(%s)" % (type, endpoint_id) elif 'endpoint_id' in fields: self.descr = "%s(%s)" % (type, fields['endpoint_id']) elif type in (Message.TYPE_EP_CR, Message.TYPE_EP_UP, Message.TYPE_EP_RM): self.descr = "%s response" % (type) else: self.descr = type #: A dictionary containing the other dynamic fields on the message. self.fields = fields # The endpoint ID for which this message is valid. Only used when # type is TYPE_ACL_UPD. self.endpoint_id = endpoint_id @property def zmq_msg(self): """ The serialized form of the message, suitable for sending on the wire. """ data = self.fields.copy() data['type'] = self.type return json.dumps(data) @classmethod def parse_message(cls, text, endpoint_id=None): """Parse a received message.""" data = json.loads(text) type = data.pop('type') msg = cls(type, data, endpoint_id) return msg ```
[ { "content": "Repeat the code exactly as the original, including blank lines:\n```python\n# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\"\"\"\nForce-Directed Graph Layout\n=======================...
[ { "content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\"\"\"\nForce-Directed Graph Layout\n=======...
```python # -*- coding: utf-8 -*- # Copyright (c) Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. """ Force-Directed Graph Layout =========================== This module contains implementations for a force-directed layout, where the graph is modelled like a collection of springs or as a collection of particles attracting and repelling each other. The whole graph tries to reach a state which requires the minimum energy. """ import numpy as np try: from scipy.sparse import issparse except ImportError: def issparse(*args, **kwargs): return False from ..util import _straight_line_vertices, _rescale_layout class fruchterman_reingold(object): """ Fruchterman-Reingold implementation adapted from NetworkX. In the Fruchterman-Reingold algorithm, the whole graph is modelled as a collection of particles, it runs a simplified particle simulation to find a nice layout for the graph. Paramters --------- optimal : number Optimal distance between nodes. Defaults to :math:`1/\\sqrt{N}` where N is the number of nodes. iterations : int Number of iterations to perform for layout calculation. pos : array Initial positions of the nodes Notes ----- The algorithm is explained in more detail in the original paper [1]_. .. [1] Fruchterman, Thomas MJ, and Edward M. Reingold. "Graph drawing by force-directed placement." Softw., Pract. Exper. 21.11 (1991), 1129-1164. """ def __init__(self, optimal=None, iterations=50, pos=None): self.dim = 2 self.optimal = optimal self.iterations = iterations self.num_nodes = None self.pos = pos def __call__(self, adjacency_mat, directed=False): """ Starts the calculation of the graph layout. This is a generator, and after each iteration it yields the new positions for the nodes, together with the vertices for the edges and the arrows. There are two solvers here: one specially adapted for SciPy sparse matrices, and the other for larger networks. Parameters ---------- adjacency_mat : array The graph adjacency matrix. directed : bool Wether the graph is directed or not. If this is True, it will draw arrows for directed edges. Yields ------ layout : tuple For each iteration of the layout calculation it yields a tuple containing (node_vertices, line_vertices, arrow_vertices). These vertices can be passed to the `MarkersVisual` and `ArrowVisual`. """ if adjacency_mat.shape[0] != adjacency_mat.shape[1]: raise ValueError("Adjacency matrix should be square.") self.num_nodes = adjacency_mat.shape[0] if issparse(adjacency_mat): # Use the sparse solver solver = self._sparse_fruchterman_reingold else: solver = self._fruchterman_reingold for result in solver(adjacency_mat, directed): yield result def _fruchterman_reingold(self, adjacency_mat, directed=False): if self.optimal is None: self.optimal = 1 / np.sqrt(self.num_nodes) if self.pos is None: # Random initial positions pos = np.asarray( np.random.random((self.num_nodes, self.dim)), dtype=np.float32 ) else: pos = self.pos.astype(np.float32) # Yield initial positions line_vertices, arrows = _straight_line_vertices(adjacency_mat, pos, directed) yield pos, line_vertices, arrows # The initial "temperature" is about .1 of domain area (=1x1) # this is the largest step allowed in the dynamics. t = 0.1 # Simple cooling scheme. # Linearly step down by dt on each iteration so last iteration is # size dt. dt = t / float(self.iterations+1) # The inscrutable (but fast) version # This is still O(V^2) # Could use multilevel methods to speed this up significantly for iteration in range(self.iterations): delta_pos = _calculate_delta_pos(adjacency_mat, pos, t, self.optimal) pos += delta_pos _rescale_layout(pos) # cool temperature t -= dt # Calculate edge vertices and arrows line_vertices, arrows = _straight_line_vertices(adjacency_mat, pos, directed) yield pos, line_vertices, arrows def _sparse_fruchterman_reingold(self, adjacency_mat, directed=False): # Optimal distance between nodes if self.optimal is None: self.optimal = 1 / np.sqrt(self.num_nodes) # Change to list of list format # Also construct the matrix in COO format for easy edge construction adjacency_arr = adjacency_mat.toarray() adjacency_coo = adjacency_mat.tocoo() if self.pos is None: # Random initial positions pos = np.asarray( np.random.random((self.num_nodes, self.dim)), dtype=np.float32 ) else: pos = self.pos.astype(np.float32) # Yield initial positions line_vertices, arrows = _straight_line_vertices(adjacency_coo, pos, directed) yield pos, line_vertices, arrows # The initial "temperature" is about .1 of domain area (=1x1) # This is the largest step allowed in the dynamics. t = 0.1 # Simple cooling scheme. # Linearly step down by dt on each iteration so last iteration is # size dt. dt = t / float(self.iterations+1) for iteration in range(self.iterations): delta_pos = _calculate_delta_pos(adjacency_arr, pos, t, self.optimal) pos += delta_pos _rescale_layout(pos) # Cool temperature t -= dt # Calculate line vertices line_vertices, arrows = _straight_line_vertices(adjacency_coo, pos, directed) yield pos, line_vertices, arrows def _calculate_delta_pos(adjacency_arr, pos, t, optimal): """Helper to calculate the delta position""" # XXX eventually this should be refactored for the sparse case to only # do the necessary pairwise distances delta = pos[:, np.newaxis, :] - pos # Distance between points distance2 = (delta*delta).sum(axis=-1) # Enforce minimum distance of 0.01 distance2 = np.where(distance2 < 0.0001, 0.0001, distance2) distance = np.sqrt(distance2) # Displacement "force" displacement = np.zeros((len(delta), 2)) for ii in range(2): displacement[:, ii] = ( delta[:, :, ii] * ((optimal * optimal) / (distance*distance) - (adjacency_arr * distance) / optimal)).sum(axis=1) length = np.sqrt((displacement**2).sum(axis=1)) length = np.where(length < 0.01, 0.1, length) delta_pos = displacement * t / length[:, np.newaxis] return delta_pos ```
[ { "content": "Replicate the source code:\n```python\n\"\"\"\nDjango settings for financeiro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values...
[ { "content": "Replicate the source code:\n<|memory_start|>```python\n\"\"\"\nDjango settings for financeiro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings ...
```python """ Django settings for financeiro project. Generated by 'django-admin startproject' using Django 1.9.5. For more information on this file, see https://docs.djangoproject.com/en/1.9/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.9/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '!lxp^uu&up613=mod5#6ect62-8(k66_fh2u*a$=y!7_yibsbi' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'financeiro.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'financeiro.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ STATIC_URL = '/static/' ```
[ { "content": "Here is the source code:\n```python\nfrom . import FixtureTest\n\n\nclass EarlyStep(FixtureTest):\n def test_steps_with_regional_route(self):\n self.load_fixtures([\n 'https://www.openstreetmap.org/way/24655593',\n 'https://www.openstreetmap.org/relation/2260059',\n...
[ { "content": "Here is the source code:\n<|memory_start|>```python\nfrom . import FixtureTest\n\n\nclass EarlyStep(FixtureTest):\n def test_steps_with_regional_route(self):\n self.load_fixtures([\n 'https://www.openstreetmap.org/way/24655593',\n 'https://www.openstreetmap.org/rela...
```python from . import FixtureTest class EarlyStep(FixtureTest): def test_steps_with_regional_route(self): self.load_fixtures([ 'https://www.openstreetmap.org/way/24655593', 'https://www.openstreetmap.org/relation/2260059', ], clip=self.tile_bbox(12, 653, 1582)) self.assert_has_feature( 12, 653, 1582, 'roads', {'kind_detail': 'steps'}) def test_steps_without_route(self): self.load_fixtures([ 'https://www.openstreetmap.org/way/38060491', ]) self.assert_has_feature( 13, 1309, 3166, 'roads', {'kind_detail': 'steps'}) def test_min_zoom(self): # way 25292070 highway=steps, no route, but has name (Esmeralda, # Bernal, SF) self.load_fixtures(['https://www.openstreetmap.org/way/25292070']) self.assert_no_matching_feature( 13, 1310, 3167, 'roads', {'kind': 'path', 'kind_detail': 'steps', 'name': 'Esmeralda Ave.'}) self.assert_has_feature( 14, 2620, 6334, 'roads', {'kind': 'path', 'kind_detail': 'steps', 'name': 'Esmeralda Ave.'}) ```
[ { "content": "```python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n***************************************************************************\n Union.py\n ---------------------\n Date : August 2012\n Copyright : (C) 2012 by Victor Olaya\n Email : volayaf at gma...
[ { "content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n***************************************************************************\n Union.py\n ---------------------\n Date : August 2012\n Copyright : (C) 2012 by Victor Olaya\n Email ...
```python # -*- coding: utf-8 -*- """ *************************************************************************** Union.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' from qgis.core import QgsFeatureRequest, QgsFeature, QgsGeometry, QgsWKBTypes from processing.core.GeoAlgorithm import GeoAlgorithm from processing.core.ProcessingLog import ProcessingLog from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException from processing.core.parameters import ParameterVector from processing.core.outputs import OutputVector from processing.tools import dataobjects, vector class Union(GeoAlgorithm): INPUT = 'INPUT' INPUT2 = 'INPUT2' OUTPUT = 'OUTPUT' def processAlgorithm(self, progress): vlayerA = dataobjects.getObjectFromUri(self.getParameterValue(Union.INPUT)) vlayerB = dataobjects.getObjectFromUri(self.getParameterValue(Union.INPUT2)) GEOS_EXCEPT = True FEATURE_EXCEPT = True vproviderA = vlayerA.dataProvider() fields = vector.combineVectorFields(vlayerA, vlayerB) names = [field.name() for field in fields] ProcessingLog.addToLog(ProcessingLog.LOG_INFO, unicode(names)) writer = self.getOutputFromName(Union.OUTPUT).getVectorWriter(fields, vproviderA.geometryType(), vproviderA.crs()) inFeatA = QgsFeature() inFeatB = QgsFeature() outFeat = QgsFeature() indexA = vector.spatialindex(vlayerB) indexB = vector.spatialindex(vlayerA) count = 0 nElement = 0 featuresA = vector.features(vlayerA) nFeat = len(featuresA) for inFeatA in featuresA: progress.setPercentage(nElement / float(nFeat) * 50) nElement += 1 lstIntersectingB = [] geom = QgsGeometry(inFeatA.geometry()) atMapA = inFeatA.attributes() intersects = indexA.intersects(geom.boundingBox()) if len(intersects) < 1: try: outFeat.setGeometry(geom) outFeat.setAttributes(atMapA) writer.addFeature(outFeat) except: # This really shouldn't happen, as we haven't # edited the input geom at all raise GeoAlgorithmExecutionException( self.tr('Feature exception while computing union')) else: for id in intersects: count += 1 request = QgsFeatureRequest().setFilterFid(id) inFeatB = vlayerB.getFeatures(request).next() atMapB = inFeatB.attributes() tmpGeom = QgsGeometry(inFeatB.geometry()) if geom.intersects(tmpGeom): int_geom = geom.intersection(tmpGeom) lstIntersectingB.append(tmpGeom) if int_geom is None: # There was a problem creating the intersection raise GeoAlgorithmExecutionException( self.tr('Geometry exception while computing ' 'intersection')) else: int_geom = QgsGeometry(int_geom) if int_geom.wkbType() == QGis.WKBUnknown or QgsWKBTypes.flatType(int_geom.geometry().wkbType()) == QgsWKBTypes.GeometryCollection: # Intersection produced different geomety types temp_list = int_geom.asGeometryCollection() for i in temp_list: if i.type() == geom.type(): int_geom = QgsGeometry(i) try: outFeat.setGeometry(int_geom) attrs = [] attrs.extend(atMapA) attrs.extend(atMapB) outFeat.setAttributes(attrs) writer.addFeature(outFeat) except Exception as err: raise GeoAlgorithmExecutionException( self.tr('Feature exception while computing union')) try: # the remaining bit of inFeatA's geometry # if there is nothing left, this will just silently fail and we're good diff_geom = QgsGeometry(geom) if len(lstIntersectingB) != 0: intB = QgsGeometry.unaryUnion(lstIntersectingB) diff_geom = diff_geom.difference(intB) if diff_geom.wkbType() == 0 or QgsWKBTypes.flatType(int_geom.geometry().wkbType()) == QgsWKBTypes.GeometryCollection: temp_list = diff_geom.asGeometryCollection() for i in temp_list: if i.type() == geom.type(): diff_geom = QgsGeometry(i) outFeat.setGeometry(diff_geom) outFeat.setAttributes(atMapA) writer.addFeature(outFeat) except Exception as err: raise GeoAlgorithmExecutionException( self.tr('Feature exception while computing union')) length = len(vproviderA.fields()) featuresA = vector.features(vlayerB) nFeat = len(featuresA) for inFeatA in featuresA: progress.setPercentage(nElement / float(nFeat) * 100) add = False geom = QgsGeometry(inFeatA.geometry()) diff_geom = QgsGeometry(geom) atMap = [None] * length atMap.extend(inFeatA.attributes()) intersects = indexB.intersects(geom.boundingBox()) if len(intersects) < 1: try: outFeat.setGeometry(geom) outFeat.setAttributes(atMap) writer.addFeature(outFeat) except Exception as err: raise GeoAlgorithmExecutionException( self.tr('Feature exception while computing union')) else: for id in intersects: request = QgsFeatureRequest().setFilterFid(id) inFeatB = vlayerA.getFeatures(request).next() atMapB = inFeatB.attributes() tmpGeom = QgsGeometry(inFeatB.geometry()) try: if diff_geom.intersects(tmpGeom): add = True diff_geom = QgsGeometry( diff_geom.difference(tmpGeom)) else: # Ihis only happends if the bounding box # intersects, but the geometry doesn't outFeat.setGeometry(diff_geom) outFeat.setAttributes(atMap) writer.addFeature(outFeat) except Exception as err: raise GeoAlgorithmExecutionException( self.tr('Geometry exception while computing intersection')) if add: try: outFeat.setGeometry(diff_geom) outFeat.setAttributes(atMap) writer.addFeature(outFeat) except Exception as err: raise err FEATURE_EXCEPT = False nElement += 1 del writer if not GEOS_EXCEPT: ProcessingLog.addToLog(ProcessingLog.LOG_WARNING, self.tr('Geometry exception while computing intersection')) if not FEATURE_EXCEPT: ProcessingLog.addToLog(ProcessingLog.LOG_WARNING, self.tr('Feature exception while computing intersection')) def defineCharacteristics(self): self.name, self.i18n_name = self.trAlgorithm('Union') self.group, self.i18n_group = self.trAlgorithm('Vector overlay tools') self.addParameter(ParameterVector(Union.INPUT, self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY])) self.addParameter(ParameterVector(Union.INPUT2, self.tr('Input layer 2'), [ParameterVector.VECTOR_TYPE_ANY])) self.addOutput(OutputVector(Union.OUTPUT, self.tr('Union'))) ```
[ { "content": "Replicate the source code:\n```python\n#===========================================================================\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of version 2.1 of the GNU Lesser General Public\n# License as published by the Free Soft...
[ { "content": "Replicate the source code:\n<|memory_start|>```python\n#===========================================================================\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of version 2.1 of the GNU Lesser General Public\n# License as published ...
```python #=========================================================================== # This library is free software; you can redistribute it and/or # modify it under the terms of version 2.1 of the GNU Lesser General Public # License as published by the Free Software Foundation. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #============================================================================ # Copyright (C) 2006-2007 XenSource Ltd #============================================================================ import logging import os import re import time import types from xen.xend import sxp from xen.xend import uuid from xen.xend import XendOptions from xen.xend import XendAPIStore from xen.xend.XendPPCI import XendPPCI from xen.xend.XendDPCI import XendDPCI from xen.xend.XendPSCSI import XendPSCSI from xen.xend.XendDSCSI import XendDSCSI, XendDSCSI_HBA from xen.xend.XendError import VmError from xen.xend.XendDevices import XendDevices from xen.xend.PrettyPrint import prettyprintstring from xen.xend.XendConstants import DOM_STATE_HALTED, AUTO_PHP_SLOT from xen.xend.xenstore.xstransact import xstransact from xen.xend.server.BlktapController import blktap_disk_types from xen.xend.server.netif import randomMAC from xen.util.blkif import blkdev_name_to_number, blkdev_uname_to_file from xen.util.pci import pci_opts_list_from_sxp, pci_convert_sxp_to_dict from xen.xend.XendSXPDev import dev_dict_to_sxp from xen.util import xsconstants from xen.util import auxbin from xen.xend.XendAPIConstants import * import xen.util.fileuri log = logging.getLogger("xend.XendConfig") log.setLevel(logging.WARN) """ XendConfig API XendConfig will try to mirror as closely the Xen API VM Struct with extra parameters for those options that are not supported. """ def reverse_dict(adict): """Return the reverse mapping of a dictionary.""" return dict([(v, k) for k, v in adict.items()]) def bool0(v): return v != '0' and v != 'False' and bool(v) def convert_on_crash(v): v = str(v) if v in XEN_API_ON_CRASH_BEHAVIOUR: return XEN_API_ON_CRASH_BEHAVIOUR_LEGACY[v] else: return v # Recursively copy a data struct, scrubbing out VNC passwords. # Will scrub any dict entry with a key of 'vncpasswd' or any # 2-element list whose first member is 'vncpasswd'. It will # also scrub a string matching '(vncpasswd XYZ)'. Everything # else is no-op passthrough def scrub_password(data): if type(data) == dict or type(data) == XendConfig: scrubbed = {} for key in data.keys(): if key == "vncpasswd": scrubbed[key] = "XXXXXXXX" else: scrubbed[key] = scrub_password(data[key]) return scrubbed elif type(data) == list: if len(data) == 2 and type(data[0]) == str and data[0] == 'vncpasswd': return ['vncpasswd', 'XXXXXXXX'] else: scrubbed = [] for entry in data: scrubbed.append(scrub_password(entry)) return scrubbed elif type(data) == tuple: scrubbed = [] for entry in data: scrubbed.append(scrub_password(entry)) return tuple(scrubbed) elif type(data) == str: return re.sub(r'\(vncpasswd\s+[^\)]+\)','(vncpasswd XXXXXX)', data) else: return data # # CPU fields: # # VCPUs_max -- the maximum number of vcpus that this domain may ever have. # aka XendDomainInfo.getVCpuCount(). # vcpus -- the legacy configuration name for above. # max_vcpu_id -- vcpus_number - 1. This is given to us by Xen. # # cpus -- the list of pCPUs available to each vCPU. # # vcpu_avail -- a bitmap telling the guest domain whether it may use each of # its VCPUs. This is translated to # <dompath>/cpu/<id>/availability = {online,offline} for use # by the guest domain. # VCPUs_live -- the number of VCPUs currently up, as reported by Xen. This # is changed by changing vcpu_avail, and waiting for the # domain to respond. # # Mapping from XendConfig configuration keys to the old # legacy configuration keys that map directly. XENAPI_CFG_TO_LEGACY_CFG = { 'uuid': 'uuid', 'VCPUs_max': 'vcpus', 'cpus': 'cpus', 'name_label': 'name', 'actions_after_shutdown': 'on_poweroff', 'actions_after_reboot': 'on_reboot', 'actions_after_crash': 'on_crash', 'PV_bootloader': 'bootloader', 'PV_bootloader_args': 'bootloader_args', 'Description': 'description', 'pool_name' : 'pool_name', } LEGACY_CFG_TO_XENAPI_CFG = reverse_dict(XENAPI_CFG_TO_LEGACY_CFG) # Platform configuration keys and their types. XENAPI_PLATFORM_CFG_TYPES = { 'acpi': int, 'apic': int, 'boot': str, 'device_model': str, 'actmem': str, 'xenpaging_file': str, 'xenpaging_extra': str, 'loader': str, 'display' : str, 'fda': str, 'fdb': str, 'keymap': str, 'isa' : int, 'localtime': int, 'monitor': int, 'monitor_path': str, 'nographic': int, 'nomigrate': int, 'pae' : int, 'extid': int, 'rtc_timeoffset': int, 'parallel': str, 'serial': str, 'sdl': int, 'opengl': int, 'soundhw': str, 'stdvga': int, 'videoram': int, 'usb': int, 'usbdevice': str, 'hpet': int, 'vnc': int, 'vncconsole': int, 'vncdisplay': int, 'vnclisten': str, 'timer_mode': int, 'tsc_mode': int, 'vpt_align': int, 'viridian': int, 'vncpasswd': str, 'vncunused': int, 'xauthority': str, 'pci': str, 'vhpt': int, 'guest_os_type': str, 'hap': int, 'xen_extended_power_mgmt': int, 'pci_msitranslate': int, 'pci_power_mgmt': int, 'xen_platform_pci': int, "gfx_passthru": int, 'oos' : int, 'watchdog': str, 'watchdog_action': str, } # Xen API console 'other_config' keys. XENAPI_CONSOLE_OTHER_CFG = ['vncunused', 'vncdisplay', 'vnclisten', 'vncpasswd', 'sdl', 'vnc', 'display', 'xauthority', 'keymap', 'opengl'] # List of XendConfig configuration keys that have no direct equivalent # in the old world. XENAPI_CFG_TYPES = { 'uuid': str, 'name_label': str, 'name_description': str, 'user_version': str, 'is_a_template': bool0, 'auto_power_on': bool0, 'resident_on': str, 'memory_static_min': int, # note these are stored in bytes, not KB! 'memory_static_max': int, 'memory_dynamic_min': int, 'memory_dynamic_max': int, 'cpus': list, 'vcpus_params': dict, 'VCPUs_max': int, 'VCPUs_at_startup': int, 'VCPUs_live': int, 'actions_after_shutdown': str, 'actions_after_reboot': str, 'actions_after_crash': convert_on_crash, 'PV_bootloader': str, 'PV_kernel': str, 'PV_ramdisk': str, 'PV_args': str, 'PV_bootloader_args': str, 'HVM_boot_policy': str, 'HVM_boot_params': dict, 'PCI_bus': str, 'platform': dict, 'tools_version': dict, 'other_config': dict, 'target': int, 'security_label': str, 'pci': str, 'cpuid' : dict, 'cpuid_check' : dict, 'machine_address_size': int, 'suppress_spurious_page_faults': bool0, 's3_integrity' : int, 'superpages' : int, 'memory_sharing': int, 'pool_name' : str, 'Description': str, 'snapshotname': str, } # List of legacy configuration keys that have no equivalent in the # Xen API, but are still stored in XendConfig. LEGACY_UNSUPPORTED_BY_XENAPI_CFG = [ # roundtripped (dynamic, unmodified) 'shadow_memory', 'vcpu_avail', 'features', # read/write 'on_xend_start', 'on_xend_stop', # read-only 'domid', 'start_time', 'cpu_time', 'online_vcpus', # write-once 'cpu', 'cpus', ] LEGACY_CFG_TYPES = { 'uuid': str, 'name': str, 'vcpus': int, 'vcpu_avail': long, 'memory': int, 'shadow_memory': int, 'maxmem': int, 'start_time': float, 'cpu_time': float, 'features': str, 'localtime': int, 'on_poweroff': str, 'on_reboot': str, 'on_crash': str, 'on_xend_stop': str, 'on_xend_start': str, 'online_vcpus': int, 'rtc/timeoffset': str, 'bootloader': str, 'bootloader_args': str, 'description': str, 'pool_name': str, } # Values that should be stored in xenstore's /vm/<uuid> that is used # by Xend. Used in XendDomainInfo to restore running VM state from # xenstore. LEGACY_XENSTORE_VM_PARAMS = [ 'uuid', 'name', 'vcpus', 'vcpu_avail', 'memory', 'shadow_memory', 'maxmem', 'start_time', 'on_poweroff', 'on_crash', 'on_reboot', 'on_xend_start', 'on_xend_stop', 'bootloader', 'bootloader_args', 'pool_name', ] ## ## Config Choices ## CONFIG_RESTART_MODES = ('restart', 'destroy', 'preserve', 'rename-restart', 'coredump-destroy', 'coredump-restart') CONFIG_OLD_DOM_STATES = ('running', 'blocked', 'paused', 'shutdown', 'crashed', 'dying') class XendConfigError(VmError): def __str__(self): return 'Invalid Configuration: %s' % str(self.value) ## ## XendConfig Class (an extended dictionary) ## class XendConfig(dict): """ The new Xend VM Configuration. Stores the configuration in xenapi compatible format but retains import and export functions for SXP. """ def __init__(self, filename = None, sxp_obj = None, xapi = None, dominfo = None): dict.__init__(self) self.update(self._defaults()) if filename: try: sxp_obj = sxp.parse(open(filename,'r')) sxp_obj = sxp_obj[0] except IOError, e: raise XendConfigError("Unable to read file: %s" % filename) if sxp_obj: self._sxp_to_xapi(sxp_obj) self._sxp_to_xapi_unsupported(sxp_obj) elif xapi: self.update_with_xenapi_config(xapi) elif dominfo: # output from xc.domain_getinfo self._dominfo_to_xapi(dominfo, update_mem = True) self.handle_fileuris() log.debug('XendConfig.init: %s' % scrub_password(self)) # validators go here self.validate() """ In time, we should enable this type checking addition. It is great also for tracking bugs and unintended writes to XendDomainInfo.info def __setitem__(self, key, value): type_conv = XENAPI_CFG_TYPES.get(key) if callable(type_conv): try: dict.__setitem__(self, key, type_conv(value)) except (ValueError, TypeError): raise XendConfigError("Wrong type for configuration value " + "%s. Expected %s" % (key, type_conv.__name__)) else: dict.__setitem__(self, key, value) """ def _defaults(self): defaults = { 'name_label': 'Domain-Unnamed', 'actions_after_shutdown': 'destroy', 'actions_after_reboot': 'restart', 'actions_after_crash': 'restart', 'actions_after_suspend': '', 'is_a_template': False, 'auto_power_on': False, 'is_control_domain': False, 'features': '', 'PV_bootloader': '', 'PV_kernel': '', 'PV_ramdisk': '', 'PV_args': '', 'PV_bootloader_args': '', 'HVM_boot_policy': '', 'HVM_boot_params': {}, 'memory_static_min': 0, 'memory_dynamic_min': 0, 'shadow_memory': 0, 'memory_static_max': 0, 'memory_dynamic_max': 0, 'memory_sharing': 0, 'devices': {}, 'on_xend_start': 'ignore', 'on_xend_stop': 'ignore', 'cpus': [], 'VCPUs_max': 1, 'VCPUs_live': 1, 'VCPUs_at_startup': 1, 'vcpus_params': {}, 'console_refs': [], 'vif_refs': [], 'vbd_refs': [], 'vtpm_refs': [], 'other_config': {}, 'platform': {}, 'target': 0, 'pool_name' : 'Pool-0', 'superpages': 0, 'description': '', } return defaults # # Here we assume these values exist in the dict. # If they don't we have a bigger problem, lets not # try and 'fix it up' but acutually fix the cause ;-) # def _memory_sanity_check(self): log.trace("_memory_sanity_check memory_static_min: %s, " "memory_static_max: %i, " "memory_dynamic_min: %i, " "memory_dynamic_max: %i", self["memory_static_min"], self["memory_static_max"], self["memory_dynamic_min"], self["memory_dynamic_max"]) if not self["memory_static_min"] <= self["memory_static_max"]: raise XendConfigError("memory_static_min must be less " \ "than or equal to memory_static_max") if not self["memory_static_min"] <= self["memory_dynamic_min"]: raise XendConfigError("memory_static_min must be less " \ "than or equal to memory_dynamic_min") if not self["memory_dynamic_max"] <= self["memory_static_max"]: raise XendConfigError("memory_dynamic_max must be less " \ "than or equal to memory_static_max") if not self["memory_dynamic_max"] > 0: raise XendConfigError("memory_dynamic_max must be greater " \ "than zero") if not self["memory_static_max"] > 0: raise XendConfigError("memory_static_max must be greater " \ "than zero") if self["memory_sharing"] and not self.is_hvm(): raise XendConfigError("memory_sharing can only be enabled " \ "for HVM domains") if self["memory_sharing"] and not self.is_hap(): raise XendConfigError("memory_sharing can only be enabled " \ "for HAP enabled boxes") def _actions_sanity_check(self): for event in ['shutdown', 'reboot', 'crash']: if self['actions_after_' + event] not in CONFIG_RESTART_MODES: raise XendConfigError('Invalid event handling mode: ' + event) def _vcpus_sanity_check(self): if 'VCPUs_max' in self and 'vcpu_avail' not in self: self['vcpu_avail'] = (1 << self['VCPUs_max']) - 1 if 'online_vcpus' in self: self['VCPUs_live'] = self['online_vcpus'] def _uuid_sanity_check(self): """Make sure UUID is in proper string format with hyphens.""" if 'uuid' not in self or not self['uuid']: self['uuid'] = uuid.createString() else: self['uuid'] = uuid.toString(uuid.fromString(self['uuid'])) def _name_sanity_check(self): if 'name_label' not in self: self['name_label'] = 'Domain-' + self['uuid'] def _platform_sanity_check(self): if 'keymap' not in self['platform'] and XendOptions.instance().get_keymap(): self['platform']['keymap'] = XendOptions.instance().get_keymap() if self.is_hvm() or self.has_rfb(): if 'device_model' not in self['platform']: self['platform']['device_model'] = auxbin.pathTo("qemu-dm") # device_model may be set to 'qemu-dm' or 'stubdom-dm' w/o a path if os.path.dirname(self['platform']['device_model']) == "": self['platform']['device_model'] = \ auxbin.pathTo(self['platform']['device_model']) # If the device_model is not set the os.path.exists() would raise # an exception so we return our error message instead if applicable if not self['platform']['device_model']: raise VmError("No valid device model specified") if not os.path.exists(self['platform']['device_model']): raise VmError("device model '%s' not found" % str(self['platform']['device_model'])) if 'tsc_mode' not in self['platform']: self['platform']['tsc_mode'] = 0 if 'nomigrate' not in self['platform']: self['platform']['nomigrate'] = 0 if self.is_hvm(): if 'actmem' not in self['platform']: self['platform']['actmem'] = "0" if 'xenpaging_file' not in self['platform']: self['platform']['xenpaging_file'] = "" if 'xenpaging_extra' not in self['platform']: self['platform']['xenpaging_extra'] = [] if 'timer_mode' not in self['platform']: self['platform']['timer_mode'] = 1 if 'extid' in self['platform'] and int(self['platform']['extid']) == 1: self['platform']['viridian'] = 1 if 'viridian' not in self['platform']: self['platform']['viridian'] = 0 if 'rtc_timeoffset' not in self['platform']: self['platform']['rtc_timeoffset'] = 0 if 'hpet' not in self['platform']: self['platform']['hpet'] = 0 if 'xen_platform_pci' not in self['platform']: self['platform']['xen_platform_pci'] = 1 if 'vpt_align' not in self['platform']: self['platform']['vpt_align'] = 1 if 'loader' not in self['platform']: # Old configs may have hvmloader set as PV_kernel param if self.has_key('PV_kernel') and self['PV_kernel'] != '': if self['PV_kernel'] == 'hvmloader': self['PV_kernel'] = auxbin.pathTo("hvmloader") self['platform']['loader'] = self['PV_kernel'] self['PV_kernel'] = '' else: self['platform']['loader'] = auxbin.pathTo("hvmloader") log.debug("Loader is %s" % str(self['platform']['loader'])) elif self['platform']['loader'] == 'hvmloader': self['platform']['loader'] = auxbin.pathTo("hvmloader") if not os.path.exists(self['platform']['loader']): raise VmError("kernel '%s' not found" % str(self['platform']['loader'])) # Compatibility hack, can go away soon. if 'soundhw' not in self['platform'] and \ self['platform'].get('enable_audio'): self['platform']['soundhw'] = 'sb16' def _vfb_sanity_check(self): if 'keymap' in self['platform']: for con in self['console_refs']: if self['devices'][con][0] == 'vfb': if 'keymap' not in self['devices'][con][1]: self['devices'][con][1]['keymap'] = \ self['platform']['keymap'] def validate(self): self._uuid_sanity_check() self._name_sanity_check() self._memory_sanity_check() self._actions_sanity_check() self._vcpus_sanity_check() self._platform_sanity_check() self._vfb_sanity_check() def _dominfo_to_xapi(self, dominfo, update_mem = False): self['domid'] = dominfo['domid'] self['online_vcpus'] = dominfo['online_vcpus'] self['VCPUs_max'] = dominfo['max_vcpu_id'] + 1 if update_mem: self['memory_dynamic_min'] = dominfo['mem_kb'] * 1024 self['memory_dynamic_max'] = dominfo['mem_kb'] * 1024 self['memory_static_min'] = 0 self['memory_static_max'] = dominfo['maxmem_kb'] * 1024 self._memory_sanity_check() self['cpu_time'] = dominfo['cpu_time']/1e9 if dominfo.get('ssidref'): ssidref = int(dominfo.get('ssidref')) import xen.util.xsm.xsm as security self['security_label'] = security.ssidref2security_label(ssidref) self['shutdown_reason'] = dominfo['shutdown_reason'] # parse state into Xen API states self['running'] = dominfo['running'] self['crashed'] = dominfo['crashed'] self['dying'] = dominfo['dying'] self['shutdown'] = dominfo['shutdown'] self['paused'] = dominfo['paused'] self['blocked'] = dominfo['blocked'] if 'name' in dominfo: self['name_label'] = dominfo['name'] if 'handle' in dominfo: self['uuid'] = uuid.toString(dominfo['handle']) def _convert_cpus_to_list(self, s): # Convert the following string to list of ints. # The string supports a list of ranges (0-3), # seperated by commas, and negation (^1). # Precedence is settled by order of the string: # "0-3,^1" -> [0,2,3] # "0-3,^1,1" -> [0,1,2,3] l = [] if s == "": return l for c in s.split(','): if c.find('-') != -1: (x, y) = c.split('-') for i in range(int(x), int(y)+1): l.append(int(i)) else: # remove this element from the list if c[0] == '^': l = [x for x in l if x != int(c[1:])] else: l.append(int(c)) return l def parse_cpuid(self, cfg, field): def int2bin(n, count=32): return "".join([str((n >> y) & 1) for y in range(count-1, -1, -1)]) for input, regs in cfg[field].iteritems(): if not regs is dict: cfg[field][input] = dict(regs) cpuid = {} for input in cfg[field]: inputs = input.split(',') if inputs[0][0:2] == '0x': inputs[0] = str(int(inputs[0], 16)) if len(inputs) == 2: if inputs[1][0:2] == '0x': inputs[1] = str(int(inputs[1], 16)) new_input = ','.join(inputs) cpuid[new_input] = {} # new input for reg in cfg[field][input]: val = cfg[field][input][reg] if val[0:2] == '0x': cpuid[new_input][reg] = int2bin(int(val, 16)) else: cpuid[new_input][reg] = val cfg[field] = cpuid def _parse_sxp(self, sxp_cfg): """ Populate this XendConfig using the parsed SXP. @param sxp_cfg: Parsed SXP Configuration @type sxp_cfg: list of lists @rtype: dictionary @return: A dictionary containing the parsed options of the SXP. """ cfg = {} for key, typ in XENAPI_CFG_TYPES.items(): val = sxp.child_value(sxp_cfg, key) if val is not None: try: cfg[key] = typ(val) except (ValueError, TypeError), e: log.warn('Unable to convert type value for key: %s' % key) # Convert deprecated options to current equivalents. restart = sxp.child_value(sxp_cfg, 'restart') if restart: if restart == 'onreboot': cfg['on_poweroff'] = 'destroy' cfg['on_reboot'] = 'restart' cfg['on_crash'] = 'destroy' elif restart == 'always': for opt in ('on_poweroff', 'on_reboot', 'on_crash'): cfg[opt] = 'restart' elif restart == 'never': for opt in ('on_poweroff', 'on_reboot', 'on_crash'): cfg[opt] = 'never' else: log.warn('Ignoring unrecognised value for deprecated option:' 'restart = \'%s\'', restart) # Handle memory, passed in as MiB if sxp.child_value(sxp_cfg, "memory") != None: cfg["memory"] = int(sxp.child_value(sxp_cfg, "memory")) if sxp.child_value(sxp_cfg, "maxmem") != None: cfg["maxmem"] = int(sxp.child_value(sxp_cfg, "maxmem")) # Convert scheduling parameters to vcpus_params if 'vcpus_params' not in cfg: cfg['vcpus_params'] = {} if not cfg["vcpus_params"].has_key("weight"): cfg["vcpus_params"]["weight"] = \ int(sxp.child_value(sxp_cfg, "cpu_weight", 256)) else: cfg["vcpus_params"]["weight"] = int(cfg["vcpus_params"]["weight"]) if not cfg["vcpus_params"].has_key("cap"): cfg["vcpus_params"]["cap"] = \ int(sxp.child_value(sxp_cfg, "cpu_cap", 0)) else: cfg["vcpus_params"]["cap"] = int(cfg["vcpus_params"]["cap"]) # Only extract options we know about. extract_keys = LEGACY_UNSUPPORTED_BY_XENAPI_CFG + \ XENAPI_CFG_TO_LEGACY_CFG.values() for key in extract_keys: val = sxp.child_value(sxp_cfg, key) if val != None: try: cfg[key] = LEGACY_CFG_TYPES[key](val) except KeyError: cfg[key] = val except (TypeError, ValueError), e: log.warn("Unable to parse key %s: %s: %s" % (key, str(val), e)) if 'platform' not in cfg: cfg['platform'] = {} localtime = sxp.child_value(sxp_cfg, 'localtime') if localtime is not None: cfg['platform']['localtime'] = localtime # Compatibility hack -- can go soon. for key in XENAPI_PLATFORM_CFG_TYPES.keys(): val = sxp.child_value(sxp_cfg, "platform_" + key, None) if val is not None: self['platform'][key] = val # Compatibility hack -- can go soon. boot_order = sxp.child_value(sxp_cfg, 'HVM_boot') if boot_order: cfg['HVM_boot_policy'] = 'BIOS order' cfg['HVM_boot_params'] = { 'order' : boot_order } # Parsing the device SXP's. cfg['devices'] = {} for dev in sxp.children(sxp_cfg, 'device'): config = sxp.child0(dev) dev_type = sxp.name(config) self.device_add(dev_type, cfg_sxp = config, target = cfg) # Extract missing data from configuration entries image_sxp = sxp.child_value(sxp_cfg, 'image', []) if image_sxp: image_vcpus = sxp.child_value(image_sxp, 'vcpus') if image_vcpus != None: try: if 'VCPUs_max' not in cfg: cfg['VCPUs_max'] = int(image_vcpus) elif cfg['VCPUs_max'] != int(image_vcpus): cfg['VCPUs_max'] = int(image_vcpus) log.warn('Overriding vcpus from %d to %d using image' 'vcpus value.', cfg['VCPUs_max']) except ValueError, e: raise XendConfigError('integer expeceted: %s: %s' % image_sxp, e) # Deprecated cpu configuration if 'cpu' in cfg: if 'cpus' in cfg: cfg['cpus'] = "%s,%s" % (str(cfg['cpu']), cfg['cpus']) else: cfg['cpus'] = str(cfg['cpu']) # Convert 'cpus' to list of list of ints cpus_list = [] if 'cpus' in cfg: if type(cfg['cpus']) == list: if len(cfg['cpus']) > 0 and type(cfg['cpus'][0]) == list: # If sxp_cfg was created from config.sxp, # the form of 'cpus' is list of list of string. # Convert 'cpus' to list of list of ints. # Conversion examples: # [['1']] -> [[1]] # [['0','2'],['1','3']] -> [[0,2],[1,3]] try: for c1 in cfg['cpus']: cpus = [] for c2 in c1: cpus.append(int(c2)) cpus_list.append(cpus) except ValueError, e: raise XendConfigError('cpus = %s: %s' % (cfg['cpus'], e)) else: # Conversion examples: # ["1"] -> [[1]] # ["0,2","1,3"] -> [[0,2],[1,3]] # ["0-3,^1","1-4,^2"] -> [[0,2,3],[1,3,4]] try: for c in cfg['cpus']: cpus = self._convert_cpus_to_list(c) cpus_list.append(cpus) except ValueError, e: raise XendConfigError('cpus = %s: %s' % (cfg['cpus'], e)) if len(cpus_list) != cfg['vcpus']: raise XendConfigError('vcpus and the item number of cpus are not same') else: # Conversion examples: # vcpus=1: # "1" -> [[1]] # "0-3,^1" -> [[0,2,3]] # vcpus=2: # "1" -> [[1],[1]] # "0-3,^1" -> [[0,2,3],[0,2,3]] try: cpus = self._convert_cpus_to_list(cfg['cpus']) for v in range(0, cfg['vcpus']): cpus_list.append(cpus) except ValueError, e: raise XendConfigError('cpus = %s: %s' % (cfg['cpus'], e)) else: # Generation examples: # vcpus=1: # -> [[]] # vcpus=2: # -> [[],[]] for v in range(0, cfg['vcpus']): cpus_list.append(list()) cfg['cpus'] = cpus_list # Parse cpuid if 'cpuid' in cfg: self.parse_cpuid(cfg, 'cpuid') if 'cpuid_check' in cfg: self.parse_cpuid(cfg, 'cpuid_check') import xen.util.xsm.xsm as security if security.on() == xsconstants.XS_POLICY_USE: from xen.util.acmpolicy import ACM_LABEL_UNLABELED if not 'security' in cfg and sxp.child_value(sxp_cfg, 'security'): cfg['security'] = sxp.child_value(sxp_cfg, 'security') elif not cfg.get('security_label'): cfg['security'] = [['access_control', ['policy', security.get_active_policy_name() ], ['label', ACM_LABEL_UNLABELED ]]] if 'security' in cfg and not cfg.get('security_label'): secinfo = cfg['security'] # The xm command sends a list formatted like this: # [['access_control', ['policy', 'xm-test'],['label', 'red']], # ['ssidref', 196611]] policy = "" label = "" for idx in range(0, len(secinfo)): if secinfo[idx][0] == "access_control": for aidx in range(1, len(secinfo[idx])): if secinfo[idx][aidx][0] == "policy": policy = secinfo[idx][aidx][1] if secinfo[idx][aidx][0] == "label": label = secinfo[idx][aidx][1] cfg['security_label'] = \ security.set_security_label(policy, label) if not sxp.child_value(sxp_cfg, 'security_label'): del cfg['security'] old_state = sxp.child_value(sxp_cfg, 'state') if old_state: for i in range(len(CONFIG_OLD_DOM_STATES)): cfg[CONFIG_OLD_DOM_STATES[i]] = int(old_state[i] != '-') return cfg def _sxp_to_xapi(self, sxp_cfg): """Read in an SXP Configuration object and populate at much of the Xen API with valid values. """ log.debug('_sxp_to_xapi(%s)' % scrub_password(sxp_cfg)) # _parse_sxp() below will call device_add() and construct devices. # Some devices may require VM's uuid, so setup self['uuid'] # beforehand. self['uuid'] = sxp.child_value(sxp_cfg, 'uuid', uuid.createString()) cfg = self._parse_sxp(sxp_cfg) for key, typ in XENAPI_CFG_TYPES.items(): val = cfg.get(key) if val is not None: self[key] = typ(val) # Convert parameters that can be directly mapped from # the Legacy Config to Xen API Config for apikey, cfgkey in XENAPI_CFG_TO_LEGACY_CFG.items(): try: type_conv = XENAPI_CFG_TYPES.get(apikey) if callable(type_conv): self[apikey] = type_conv(cfg[cfgkey]) else: log.warn("Unconverted key: " + apikey) self[apikey] = cfg[cfgkey] except KeyError: pass # Lets try and handle memory correctly MiB = 1024 * 1024 if "memory" in cfg: self["memory_static_min"] = 0 self["memory_static_max"] = int(cfg["memory"]) * MiB self["memory_dynamic_min"] = int(cfg["memory"]) * MiB self["memory_dynamic_max"] = int(cfg["memory"]) * MiB if "maxmem" in cfg: self["memory_static_max"] = int(cfg["maxmem"]) * MiB self._memory_sanity_check() def update_with(n, o): if not self.get(n): self[n] = cfg.get(o, '') update_with('PV_bootloader', 'bootloader') update_with('PV_bootloader_args', 'bootloader_args') update_with('Description', 'description') image_sxp = sxp.child_value(sxp_cfg, 'image', []) if image_sxp: self.update_with_image_sxp(image_sxp) # Convert Legacy HVM parameters to Xen API configuration for key in XENAPI_PLATFORM_CFG_TYPES.keys(): if key in cfg: self['platform'][key] = cfg[key] # set device references in the configuration self['devices'] = cfg.get('devices', {}) self['console_refs'] = cfg.get('console_refs', []) self['vif_refs'] = cfg.get('vif_refs', []) self['vbd_refs'] = cfg.get('vbd_refs', []) self['vtpm_refs'] = cfg.get('vtpm_refs', []) # coalesce hvm vnc frame buffer with vfb config if self.is_hvm() and int(self['platform'].get('vnc', 0)) != 0: # add vfb device if it isn't there already if not self.has_rfb(): dev_config = ['vfb'] dev_config.append(['vnc', '1']) # copy VNC related params from platform config to vfb dev conf for key in ['vncpasswd', 'vncunused', 'vncdisplay', 'vnclisten', 'keymap']: if key in self['platform']: dev_config.append([key, self['platform'][key]]) self.device_add('vfb', cfg_sxp = dev_config) def has_rfb(self): for console_uuid in self['console_refs']: if self['devices'][console_uuid][1].get('protocol') == 'rfb': return True if self['devices'][console_uuid][0] == 'vfb': return True return False def _sxp_to_xapi_unsupported(self, sxp_cfg): """Read in an SXP configuration object and populate values are that not related directly supported in the Xen API. """ log.debug('_sxp_to_xapi_unsupported(%s)' % scrub_password(sxp_cfg)) # Parse and convert parameters used to configure # the image (as well as HVM images) image_sxp = sxp.child_value(sxp_cfg, 'image', []) if image_sxp: image_type = sxp.name(image_sxp) if image_type != 'hvm' and image_type != 'linux': self['platform']['image_type'] = image_type for key in XENAPI_PLATFORM_CFG_TYPES.keys(): val = sxp.child_value(image_sxp, key, None) if val is not None and val != '': self['platform'][key] = val notes = sxp.children(image_sxp, 'notes') if notes: self['notes'] = self.notes_from_sxp(notes[0]) self._hvm_boot_params_from_sxp(image_sxp) # extract backend value backend = [] for c in sxp.children(sxp_cfg, 'backend'): backend.append(sxp.name(sxp.child0(c))) if backend: self['backend'] = backend # Parse and convert other Non Xen API parameters. def _set_cfg_if_exists(sxp_arg): val = sxp.child_value(sxp_cfg, sxp_arg) if val != None: if LEGACY_CFG_TYPES.get(sxp_arg): self[sxp_arg] = LEGACY_CFG_TYPES[sxp_arg](val) else: self[sxp_arg] = val _set_cfg_if_exists('shadow_memory') _set_cfg_if_exists('features') _set_cfg_if_exists('on_xend_stop') _set_cfg_if_exists('on_xend_start') _set_cfg_if_exists('vcpu_avail') _set_cfg_if_exists('change_home_server') _set_cfg_if_exists('description') # Parse and store runtime configuration _set_cfg_if_exists('start_time') _set_cfg_if_exists('cpu_time') _set_cfg_if_exists('shutdown_reason') _set_cfg_if_exists('up_time') _set_cfg_if_exists('status') # TODO, deprecated def _get_old_state_string(self): """Returns the old xm state string. @rtype: string @return: old state string """ state_string = '' for state_name in CONFIG_OLD_DOM_STATES: on_off = self.get(state_name, 0) if on_off: state_string += state_name[0] else: state_string += '-' return state_string def update_config(self, dominfo): """Update configuration with the output from xc.domain_getinfo(). @param dominfo: Domain information via xc.domain_getinfo() @type dominfo: dict """ self._dominfo_to_xapi(dominfo) self.validate() def update_with_xenapi_config(self, xapi): """Update configuration with a Xen API VM struct @param xapi: Xen API VM Struct @type xapi: dict """ log.debug('update_with_xenapi_config: %s' % scrub_password(xapi)) for key, val in xapi.items(): type_conv = XENAPI_CFG_TYPES.get(key) if type_conv is None: key = key.lower() type_conv = XENAPI_CFG_TYPES.get(key) if callable(type_conv): self[key] = type_conv(val) else: self[key] = val # XenAPI defines platform as a string-string map. If platform # configuration exists, convert values to appropriate type. if 'platform' in xapi: for key, val in xapi['platform'].items(): type_conv = XENAPI_PLATFORM_CFG_TYPES.get(key) if type_conv is None: key = key.lower() type_conv = XENAPI_PLATFORM_CFG_TYPES.get(key) if callable(type_conv): self['platform'][key] = type_conv(val) else: self['platform'][key] = val self['vcpus_params']['weight'] = \ int(self['vcpus_params'].get('weight', 256)) self['vcpus_params']['cap'] = \ int(self['vcpus_params'].get('cap', 0)) for key, val in self['vcpus_params'].items(): if key.startswith('cpumap'): self['vcpus_params'][key] = \ ','.join(map(str, self._convert_cpus_to_list(val))) def cpuid_to_sxp(self, sxpr, field): regs_list = [] for input, regs in self[field].iteritems(): reg_list = [] for reg, val in regs.iteritems(): reg_list.append([reg, val]) regs_list.append([input, reg_list]) sxpr.append([field, regs_list]) def to_sxp(self, domain = None, ignore_devices = False, ignore = [], legacy_only = True): """ Get SXP representation of this config object. Incompat: removed store_mfn, console_mfn @keyword domain: (optional) XendDomainInfo to get extra information from such as domid and running devices. @type domain: XendDomainInfo @keyword ignore: (optional) list of 'keys' that we do not want to export. @type ignore: list of strings @rtype: list of list (SXP representation) """ sxpr = ['domain'] # TODO: domid/dom is the same thing but called differently # depending if it is from xenstore or sxpr. if domain.getDomid() is not None: sxpr.append(['domid', domain.getDomid()]) if legacy_only: sxpr.append(['cpu_weight', int(self['vcpus_params'].get('weight', 256))]) sxpr.append(['cpu_cap', int(self['vcpus_params'].get('cap', 0))]) else: for name, typ in XENAPI_CFG_TYPES.items(): if name in self and self[name] not in (None, []): if typ == dict: s = self[name].items() elif typ == list: s = self[name] else: s = str(self[name]) sxpr.append([name, s]) for xenapi, legacy in XENAPI_CFG_TO_LEGACY_CFG.items(): if self.has_key(xenapi) and self[xenapi] not in (None, []): if type(self[xenapi]) == bool: # convert booleans to ints before making an sxp item sxpr.append([legacy, int(self[xenapi])]) else: sxpr.append([legacy, self[xenapi]]) MiB = 1024*1024 sxpr.append(["maxmem", int(self["memory_static_max"])/MiB]) sxpr.append(["memory", int(self["memory_dynamic_max"])/MiB]) for legacy in LEGACY_UNSUPPORTED_BY_XENAPI_CFG: if legacy in ('domid', 'uuid', 'cpus'): # skip these continue if self.has_key(legacy) and self[legacy] not in (None, []): sxpr.append([legacy, self[legacy]]) if self.has_key('security_label'): sxpr.append(['security_label', self['security_label']]) sxpr.append(['image', self.image_sxpr()]) sxpr.append(['status', domain._stateGet()]) if domain.getDomid() is not None: sxpr.append(['state', self._get_old_state_string()]) if domain: if domain.store_mfn: sxpr.append(['store_mfn', domain.store_mfn]) if domain.console_mfn: sxpr.append(['console_mfn', domain.console_mfn]) # Marshall devices (running or from configuration) if not ignore_devices: txn = xstransact() try: for cls in XendDevices.valid_devices(): found = False # figure if there is a dev controller is valid and running if domain and domain.getDomid() != None: try: controller = domain.getDeviceController(cls) configs = controller.configurations(txn) for config in configs: if sxp.name(config) in ('vbd', 'tap', 'tap2'): dev_uuid = sxp.child_value(config, 'uuid') dev_type, dev_cfg = self['devices'][dev_uuid] if sxp.child_value(config, 'bootable', None) is None: is_bootable = dev_cfg.get('bootable', 0) config.append(['bootable', int(is_bootable)]) config.append(['VDI', dev_cfg.get('VDI', '')]) sxpr.append(['device', config]) found = True except: log.exception("dumping sxp from device controllers") pass # if we didn't find that device, check the existing config # for a device in the same class if not found: for dev_type, dev_info in self.all_devices_sxpr(): if dev_type == cls: sxpr.append(['device', dev_info]) txn.commit() except: txn.abort() raise if 'cpuid' in self: self.cpuid_to_sxp(sxpr, 'cpuid') if 'cpuid_check' in self: self.cpuid_to_sxp(sxpr, 'cpuid_check') if self.has_key('change_home_server'): sxpr.append(['change_home_server', self['change_home_server']]) log.debug(sxpr) return sxpr def _blkdev_name_to_number(self, dev): if 'ioemu:' in dev: _, dev = dev.split(':', 1) try: dev, _ = dev.split(':', 1) except ValueError: pass try: devid = int(dev) except ValueError: # devid is not a number but a string containing either device # name (e.g. xvda) or device_type/device_id (e.g. vbd/51728) dev2 = type(dev) is str and dev.split('/')[-1] or None if dev2 == None: log.debug("Could not check the device %s", dev) return None try: devid = int(dev2) except ValueError: (xenbus, devid) = blkdev_name_to_number(dev2) if devid == None: log.debug("The device %s is not device name", dev2) return None return devid def device_duplicate_check(self, dev_type, dev_info, defined_config, config): defined_devices_sxpr = self.all_devices_sxpr(target = defined_config) if dev_type == 'vbd' or dev_type == 'tap' or dev_type == 'tap2': dev_uname = dev_info.get('uname') blkdev_name = dev_info.get('dev') devid = self._blkdev_name_to_number(blkdev_name) if devid == None or dev_uname == None: return for o_dev_type, o_dev_info in defined_devices_sxpr: if o_dev_type == 'vbd' or o_dev_type == 'tap' or o_dev_type == 'tap2': blkdev_file = blkdev_uname_to_file(dev_uname) o_dev_uname = sxp.child_value(o_dev_info, 'uname') if o_dev_uname and o_dev_uname != None: o_blkdev_file = blkdev_uname_to_file(o_dev_uname) if blkdev_file == o_blkdev_file: raise XendConfigError('The file "%s" is already used' % blkdev_file) if dev_uname and dev_uname == o_dev_uname: raise XendConfigError('The uname "%s" is already defined' % dev_uname) o_blkdev_name = sxp.child_value(o_dev_info, 'dev') o_devid = self._blkdev_name_to_number(o_blkdev_name) if o_devid != None and devid == o_devid: name_array = blkdev_name.split(':', 2) if len(name_array) == 2 and name_array[1] == 'cdrom': # # Since the device is a cdrom, we are most likely # inserting, changing, or removing a cd. We can # update the old device instead of creating a new # one. # if o_dev_uname != None and dev_uname == None: # # We are removing a cd. We can simply update # the uname on the existing device. # merge_sxp = sxp.from_string("('vbd' ('uname' ''))") else: merge_sxp = config dev_uuid = sxp.child_value(o_dev_info, 'uuid') if dev_uuid != None and \ self.device_update(dev_uuid, cfg_sxp = merge_sxp): return dev_uuid raise XendConfigError('The device "%s" is already defined' % blkdev_name) elif dev_type == 'vif': dev_mac = dev_info.get('mac') for o_dev_type, o_dev_info in defined_devices_sxpr: if dev_type == o_dev_type: if dev_mac.lower() == sxp.child_value(o_dev_info, 'mac').lower(): raise XendConfigError('The mac "%s" is already defined' % dev_mac) return None def create_dpci_from_sxp(self, pci_devs): for pci_dev in pci_devs: dpci_uuid = pci_dev.get('uuid') log.debug("create_dpci_from_sxp: %s" % pci_dev) ppci_uuid = XendPPCI.get_by_sbdf(pci_dev['domain'], pci_dev['bus'], pci_dev['slot'], pci_dev['func']) if ppci_uuid is None: continue dpci_record = { 'VM': self['uuid'], 'PPCI': ppci_uuid, 'hotplug_slot': pci_dev.get('vdevfn', '0x%02x' % AUTO_PHP_SLOT), 'key': pci_dev['key'] } dpci_opts = pci_dev.get('opts') if dpci_opts and len(dpci_opts) > 0: dpci_record['options'] = dpci_opts XendDPCI(dpci_uuid, dpci_record) def device_add(self, dev_type, cfg_sxp = None, cfg_xenapi = None, target = None): """Add a device configuration in SXP format or XenAPI struct format. For SXP, it could be either: [device, [vbd, [uname ...]] or: [vbd, [uname ..]] @type cfg_sxp: list of lists (parsed sxp object) @param cfg_sxp: SXP configuration object @type cfg_xenapi: dict @param cfg_xenapi: A device configuration from Xen API (eg. vbd,vif) @param target: write device information to @type target: None or a dictionary @rtype: string @return: Assigned UUID of the device. """ if target == None: target = self if dev_type not in XendDevices.valid_devices(): raise XendConfigError("XendConfig: %s not a valid device type" % dev_type) if cfg_sxp == None and cfg_xenapi == None: raise XendConfigError("XendConfig: device_add requires some " "config.") #if cfg_sxp: # log.debug("XendConfig.device_add: %s" % str(cfg_sxp)) #if cfg_xenapi: # log.debug("XendConfig.device_add: %s" % str(cfg_xenapi)) if cfg_sxp: if sxp.child0(cfg_sxp) == 'device': config = sxp.child0(cfg_sxp) else: config = cfg_sxp dev_type = sxp.name(config) dev_info = {} if dev_type == 'pci': pci_devs_uuid = sxp.child_value(config, 'uuid', uuid.createString()) pci_dict = pci_convert_sxp_to_dict(config) pci_devs = pci_dict['devs'] # create XenAPI DPCI objects. self.create_dpci_from_sxp(pci_devs) target['devices'][pci_devs_uuid] = (dev_type, {'devs': pci_devs, 'uuid': pci_devs_uuid}) log.debug("XendConfig: reading device: %s" % pci_devs) return pci_devs_uuid if dev_type == 'vscsi': vscsi_devs_uuid = sxp.child_value(config, 'uuid', uuid.createString()) vscsi_dict = self.vscsi_convert_sxp_to_dict(config) vscsi_devs = vscsi_dict['devs'] vscsi_mode = vscsi_dict['feature-host'] vscsi_be = vscsi_dict.get('backend', None) # create XenAPI DSCSI_HBA objects. dscsi_HBA_record = { 'VM': self['uuid'], 'virtual_host': int(vscsi_devs[0]['v-dev'].split(':')[0]), 'assignment_mode': vscsi_mode and 'HOST' or 'LUN' } XendDSCSI_HBA(vscsi_devs_uuid, dscsi_HBA_record) # create XenAPI DSCSI objects. for vscsi_dev in vscsi_devs: dscsi_uuid = vscsi_dev.get('uuid') pscsi_uuid = XendPSCSI.get_by_HCTL(vscsi_dev['p-dev']) if pscsi_uuid is None: continue dscsi_record = { 'VM': self['uuid'], 'PSCSI': pscsi_uuid, 'HBA': vscsi_devs_uuid, 'virtual_HCTL': vscsi_dev.get('v-dev') } XendDSCSI(dscsi_uuid, dscsi_record) vscsi_info = { 'devs': vscsi_devs, 'feature-host': vscsi_mode, 'uuid': vscsi_devs_uuid } if vscsi_be is not None: vscsi_info['backend'] = vscsi_be target['devices'][vscsi_devs_uuid] = (dev_type, vscsi_info) log.debug("XendConfig: reading device: %s,%s" % \ (vscsi_devs, vscsi_mode)) return vscsi_devs_uuid if dev_type == 'vusb': vusb_devs_uuid = sxp.child_value(config, 'uuid', uuid.createString()) vusb_dict = self.vusb_convert_sxp_to_dict(config) vusb_dict['uuid'] = vusb_devs_uuid target['devices'][vusb_devs_uuid] = (dev_type, vusb_dict) return vusb_devs_uuid for opt_val in config[1:]: try: opt, val = opt_val dev_info[opt] = val except (TypeError, ValueError): # unpack error pass if dev_type == 'vbd': if dev_info.get('dev', '').startswith('ioemu:'): dev_info['driver'] = 'ioemu' else: dev_info['driver'] = 'paravirtualised' if dev_type == 'tap' or dev_type == 'tap2': tap_disk_type = dev_info['uname'].split(':')[1] # tapdisk uname may be 'tap:<driver>' or 'tap:tapdisk:<driver>' if tap_disk_type == 'tapdisk': tap_disk_type = dev_info['uname'].split(':')[2] if tap_disk_type not in blktap_disk_types: raise XendConfigError("tap:%s not a valid disk type" % tap_disk_type) if dev_type == 'vif': if not dev_info.get('mac'): dev_info['mac'] = randomMAC() ret_uuid = self.device_duplicate_check(dev_type, dev_info, target, config) if ret_uuid != None: return ret_uuid if dev_type == 'vif': if dev_info.get('policy') and dev_info.get('label'): dev_info['security_label'] = "%s:%s:%s" % \ (xsconstants.ACM_POLICY_ID, dev_info['policy'],dev_info['label']) # create uuid if it doesn't exist dev_uuid = dev_info.get('uuid', None) if not dev_uuid: dev_uuid = uuid.createString() dev_info['uuid'] = dev_uuid # store dev references by uuid for certain device types target['devices'][dev_uuid] = (dev_type, dev_info) if dev_type in ('vif', 'vbd', 'vtpm'): param = '%s_refs' % dev_type if param not in target: target[param] = [] if dev_uuid not in target[param]: if dev_type == 'vbd': if 'bootable' not in dev_info: # Compat hack -- mark first disk bootable dev_info['bootable'] = int(not target[param]) else: # ensure type of bootable is int (on xend restart # it's of type str) dev_info['bootable'] = int(dev_info['bootable']) target[param].append(dev_uuid) elif dev_type == 'tap' or dev_type == 'tap2': if 'vbd_refs' not in target: target['vbd_refs'] = [] if dev_uuid not in target['vbd_refs']: if 'bootable' not in dev_info: # Compat hack -- mark first disk bootable dev_info['bootable'] = int(not target['vbd_refs']) else: # ensure type of bootable is int (on xend restart it's # of type str) dev_info['bootable'] = int(dev_info['bootable']) target['vbd_refs'].append(dev_uuid) elif dev_type == 'vfb': # Populate other config with aux data that is associated # with vfb other_config = {} for key in XENAPI_CONSOLE_OTHER_CFG: if key in dev_info: other_config[key] = dev_info[key] target['devices'][dev_uuid][1]['other_config'] = other_config if 'console_refs' not in target: target['console_refs'] = [] # Treat VFB devices as console devices so they are found # through Xen API if dev_uuid not in target['console_refs']: target['console_refs'].append(dev_uuid) # Cope with old-format save files which say under vfb # (type vfb) rather than (vfb 1) try: vfb_type = dev_info['type'] except KeyError: vfb_type = None log.debug("iwj dev_type=%s vfb type %s" % (dev_type, `vfb_type`)) if vfb_type == 'vnc' or vfb_type == 'sdl': dev_info[vfb_type] = 1 del dev_info['type'] log.debug("iwj dev_type=%s vfb setting dev_info['%s']" % (dev_type, vfb_type)) # Create serial backends now, the location value is bogus, but does not matter i=0 chardev=0 if dev_info.get('serial') is not None : chardev = chardev + 1 if dev_info.get('monitor') is not None : chardev = chardev + 1 if chardev > 0 : chardev = chardev + 1 while i < chardev : cfg = self.console_add('vt100', str(i)) c_uuid = uuid.createString() target['devices'][c_uuid] = ('console', cfg) target['console_refs'].append(c_uuid) i = i + 1 elif dev_type == 'console': if 'console_refs' not in target: target['console_refs'] = [] if dev_uuid not in target['console_refs']: target['console_refs'].append(dev_uuid) log.debug("XendConfig: reading device: %s" % scrub_password(dev_info)) return dev_uuid if cfg_xenapi: dev_info = {} dev_uuid = '' if dev_type == 'vif': dev_info['mac'] = cfg_xenapi.get('MAC') if not dev_info['mac']: dev_info['mac'] = randomMAC() # vifname is the name on the guest, not dom0 # TODO: we don't have the ability to find that out or # change it from dom0 #if cfg_xenapi.get('device'): # don't add if blank # dev_info['vifname'] = cfg_xenapi.get('device') if cfg_xenapi.get('type'): dev_info['type'] = cfg_xenapi.get('type') if cfg_xenapi.get('name'): dev_info['name'] = cfg_xenapi.get('name') if cfg_xenapi.get('network'): network = XendAPIStore.get( cfg_xenapi.get('network'), 'network') dev_info['bridge'] = network.get_name_label() if cfg_xenapi.get('security_label'): dev_info['security_label'] = \ cfg_xenapi.get('security_label') dev_uuid = cfg_xenapi.get('uuid', None) if not dev_uuid: dev_uuid = uuid.createString() dev_info['uuid'] = dev_uuid target['devices'][dev_uuid] = (dev_type, dev_info) target['vif_refs'].append(dev_uuid) elif dev_type in ('vbd', 'tap', 'tap2'): dev_info['type'] = cfg_xenapi.get('type', 'Disk') if dev_info['type'] == 'CD': old_vbd_type = 'cdrom' else: old_vbd_type = 'disk' dev_info['uname'] = cfg_xenapi.get('image', '') dev_info['dev'] = '%s:%s' % (cfg_xenapi.get('device'), old_vbd_type) dev_info['bootable'] = int(cfg_xenapi.get('bootable', 0)) dev_info['driver'] = cfg_xenapi.get('driver', '') dev_info['VDI'] = cfg_xenapi.get('VDI', '') if cfg_xenapi.get('mode') == 'RW': dev_info['mode'] = 'w' else: dev_info['mode'] = 'r' dev_uuid = cfg_xenapi.get('uuid', None) if not dev_uuid: dev_uuid = uuid.createString() dev_info['uuid'] = dev_uuid target['devices'][dev_uuid] = (dev_type, dev_info) target['vbd_refs'].append(dev_uuid) elif dev_type == 'vtpm': if cfg_xenapi.get('type'): dev_info['type'] = cfg_xenapi.get('type') dev_uuid = cfg_xenapi.get('uuid', None) if not dev_uuid: dev_uuid = uuid.createString() dev_info['uuid'] = dev_uuid dev_info['other_config'] = cfg_xenapi.get('other_config', {}) target['devices'][dev_uuid] = (dev_type, dev_info) target['vtpm_refs'].append(dev_uuid) elif dev_type == 'console': dev_uuid = cfg_xenapi.get('uuid', None) if not dev_uuid: dev_uuid = uuid.createString() dev_info['uuid'] = dev_uuid dev_info['protocol'] = cfg_xenapi.get('protocol', 'rfb') console_other_config = cfg_xenapi.get('other_config', {}) dev_info['other_config'] = console_other_config if dev_info['protocol'] == 'rfb': # collapse other config into devinfo for things # such as vncpasswd, vncunused, etc. dev_info.update(console_other_config) dev_info['vnc'] = console_other_config.get('vnc', '0') dev_info['sdl'] = console_other_config.get('sdl', '0') target['devices'][dev_uuid] = ('vfb', dev_info) target['console_refs'].append(dev_uuid) # if console is rfb, set device_model ensuring qemu # is invoked for pvfb services if 'device_model' not in target['platform']: target['platform']['device_model'] = \ auxbin.pathTo("qemu-dm") # Finally, if we are a pvfb, we need to make a vkbd # as well that is not really exposed to Xen API vkbd_uuid = uuid.createString() target['devices'][vkbd_uuid] = ('vkbd', {}) elif dev_info['protocol'] == 'vt100': # if someone tries to create a VT100 console # via the Xen API, we'll have to ignore it # because we create one automatically in # XendDomainInfo._update_consoles raise XendConfigError('Creating vt100 consoles via ' 'Xen API is unsupported') return dev_uuid # no valid device to add return '' def phantom_device_add(self, dev_type, cfg_xenapi = None, target = None): """Add a phantom tap device configuration in XenAPI struct format. """ if target == None: target = self if dev_type not in XendDevices.valid_devices() and \ dev_type not in XendDevices.pseudo_devices(): raise XendConfigError("XendConfig: %s not a valid device type" % dev_type) if cfg_xenapi == None: raise XendConfigError("XendConfig: device_add requires some " "config.") if cfg_xenapi: log.debug("XendConfig.phantom_device_add: %s" % str(cfg_xenapi)) if cfg_xenapi: dev_info = {} if dev_type in ('vbd', 'tap'): if dev_type == 'vbd': dev_info['uname'] = cfg_xenapi.get('image', '') dev_info['dev'] = '%s:disk' % cfg_xenapi.get('device') elif dev_type == 'tap': if cfg_xenapi.get('image').find('tap:') == -1: dev_info['uname'] = 'tap:qcow:%s' % cfg_xenapi.get('image') dev_info['dev'] = '/dev/%s' % cfg_xenapi.get('device') dev_info['uname'] = cfg_xenapi.get('image') dev_info['mode'] = cfg_xenapi.get('mode') dev_info['backend'] = '0' dev_uuid = cfg_xenapi.get('uuid', uuid.createString()) dev_info['uuid'] = dev_uuid self['devices'][dev_uuid] = (dev_type, dev_info) self['vbd_refs'].append(dev_uuid) return dev_uuid return '' def vscsi_convert_sxp_to_dict(self, dev_sxp): """Convert vscsi device sxp to dict @param dev_sxp: device configuration @type dev_sxp: SXP object (parsed config) @return: dev_config @rtype: dictionary """ # Parsing the device SXP's. In most cases, the SXP looks # like this: # # [device, [vif, [mac, xx:xx:xx:xx:xx:xx], [ip 1.3.4.5]]] # # However, for SCSI devices it looks like this: # # [device, # [vscsi, # [feature-host, 0], # [backend, 0], # [dev, # [devid, 0], [p-devname, sdb], [p-dev, 1:0:0:1], # [v-dev, 0:0:0:0], [state, 1] # ], # [dev, # [devid, 0], [p-devname, sdc], [p-dev, 1:0:0:2], # [v-dev, 0:0:0:1], [satet, 1] # ] # ], # [vscsi, # [feature-host, 1], # [backend, 0], # [dev, # [devid, 1], [p-devname, sdg], [p-dev, 2:0:0:0], # [v-dev, 1:0:0:0], [state, 1] # ], # [dev, # [devid, 1], [p-devname, sdh], [p-dev, 2:0:0:1], # [v-dev, 1:0:0:1], [satet, 1] # ] # ] # ] # # It seems the reasoning for this difference is because # vscsiif.py needs all the SCSI device configurations with # same host number at the same time when creating the devices. # For SCSI device hotplug support, the SXP of SCSI devices is # extendend like this: # # [device, # [vscsi, # [feature-host, 0], # [backend, 0], # [dev, # [devid, 0], [p-devname, sdd], [p-dev, 1:0:0:3], # [v-dev, 0:0:0:2], [state, 1] # ] # ] # ] # # state xenbusState['Initialising'] indicates that the device is # being attached, while state xenbusState['Closing'] indicates # that the device is being detached. # # The Dict looks like this: # # { devs: [ {devid: 0, p-devname: sdd, p-dev: 1:0:0:3, # v-dev: 0:0:0:2, state: 1} ], # feature-host: 1 , backend: 0 } dev_config = {} vscsi_devs = [] for vscsi_dev in sxp.children(dev_sxp, 'dev'): vscsi_dev_info = {} for opt_val in vscsi_dev[1:]: try: opt, val = opt_val vscsi_dev_info[opt] = val except TypeError: pass # append uuid for each vscsi device. vscsi_uuid = vscsi_dev_info.get('uuid', uuid.createString()) vscsi_dev_info['uuid'] = vscsi_uuid vscsi_devs.append(vscsi_dev_info) dev_config['devs'] = vscsi_devs vscsi_mode = sxp.children(dev_sxp, 'feature-host')[0] dev_config['feature-host'] = vscsi_mode[1] try: vscsi_be = sxp.children(dev_sxp, 'backend')[0] dev_config['backend'] = vscsi_be[1] except IndexError: pass return dev_config def vusb_convert_sxp_to_dict(self, dev_sxp): """Convert vusb device sxp to dict @param dev_sxp: device configuration @type dev_sxp: SXP object (parsed config) @return: dev_config @rtype: dictionary """ # Parsing USB devices SXP. # # USB device's SXP looks like this: # # [device, # [vusb, # [usb-ver, 2], # [num-ports, 8], # [port, # [1, 1-1], # [2, 1-2], # [3, ''], # [4, ''], # [5, ''], # [6, ''], # [7, 6-2.1], # [8, ''] # ] # ], # [vusb, # [usb-ver, 1], # [num-ports, 2], # [port, # [1, 4-1], # [2, 4-2] # ] # ] # ] # # The dict looks like this # # { usb-ver: 2, # num-ports: 8, # port-1: 1-1, # port-2: 1-2, # port-3: "", # port-4: "", # port-5: "", # port-6: "", # port-7: "", # port-8: "" } dev_config = {} dev_config['usb-ver'] = sxp.child(dev_sxp, 'usb-ver')[1] dev_config['num-ports'] = sxp.child(dev_sxp, 'num-ports')[1] ports = sxp.child(dev_sxp, 'port') for port in ports[1:]: try: # When ['port' ['1','']] is saved into sxp file, it will become (port (1 )) # If using this sxp file, here variable "port" will be port=1, # we should process it, otherwise, it will report error. if len(port) == 1: num = port[0] bus = "" else: num, bus = port dev_config['port-%i' % int(num)] = str(bus) except TypeError: pass return dev_config def console_add(self, protocol, location, other_config = {}): dev_uuid = uuid.createString() if protocol == 'vt100': dev_info = { 'uuid': dev_uuid, 'protocol': protocol, 'location': location, 'other_config': other_config, } if 'devices' not in self: self['devices'] = {} self['devices'][dev_uuid] = ('console', dev_info) self['console_refs'].append(dev_uuid) return dev_info return {} def console_update(self, console_uuid, key, value): for dev_uuid, (dev_type, dev_info) in self['devices'].items(): if dev_uuid == console_uuid: dev_info[key] = value # collapse other_config into dev_info for things # such as vncpasswd, vncunused, etc. if key == 'other_config': for k in XENAPI_CONSOLE_OTHER_CFG: if k in dev_info and k not in value: del dev_info[k] dev_info.update(value) break def console_get_all(self, protocol): if protocol == 'vt100': consoles = [dinfo for dtype, dinfo in self['devices'].values() if dtype == 'console'] return [c for c in consoles if c.get('protocol') == protocol] elif protocol == 'rfb': vfbs = [dinfo for dtype, dinfo in self['devices'].values() if dtype == 'vfb'] # move all non-console key values to other_config before # returning console config valid_keys = ['uuid', 'location'] for vfb in vfbs: other_config = {} for key, val in vfb.items(): if key not in valid_keys: other_config[key] = vfb[key] del vfb[key] vfb['other_config'] = other_config vfb['protocol'] = 'rfb' return vfbs else: return [] def device_update(self, dev_uuid, cfg_sxp = [], cfg_xenapi = {}): """Update an existing device with the new configuration. @rtype: boolean @return: Returns True if succesfully found and updated a device conf """ if dev_uuid in self['devices'] and cfg_sxp: if sxp.child0(cfg_sxp) == 'device': config = sxp.child0(cfg_sxp) else: config = cfg_sxp dev_type, dev_info = self['devices'][dev_uuid] if dev_type == 'pci': # Special case for pci pci_dict = pci_convert_sxp_to_dict(config) pci_devs = pci_dict['devs'] # destroy existing XenAPI DPCI objects for dpci_uuid in XendDPCI.get_by_VM(self['uuid']): XendAPIStore.deregister(dpci_uuid, "DPCI") # create XenAPI DPCI objects. self.create_dpci_from_sxp(pci_devs) self['devices'][dev_uuid] = (dev_type, {'devs': pci_devs, 'uuid': dev_uuid}) return True if dev_type == 'vscsi': # Special case for vscsi vscsi_dict = self.vscsi_convert_sxp_to_dict(config) vscsi_devs = vscsi_dict['devs'] vscsi_mode = vscsi_dict['feature-host'] vscsi_be = vscsi_dict.get('backend', None) # destroy existing XenAPI DSCSI objects vscsi_devid = int(dev_info['devs'][0]['devid']) for dscsi_uuid in XendDSCSI.get_by_VM(self['uuid']): dscsi_inst = XendAPIStore.get(dscsi_uuid, 'DSCSI') if vscsi_devid == dscsi_inst.get_virtual_host(): XendAPIStore.deregister(dscsi_uuid, "DSCSI") # destroy existing XenAPI DSCSI_HBA objects if not vscsi_devs: XendAPIStore.deregister(dev_uuid, 'DSCSI_HBA') # create XenAPI DSCSI objects. for vscsi_dev in vscsi_devs: dscsi_uuid = vscsi_dev.get('uuid') pscsi_uuid = XendPSCSI.get_by_HCTL(vscsi_dev['p-dev']) if pscsi_uuid is None: continue dscsi_record = { 'VM': self['uuid'], 'PSCSI': pscsi_uuid, 'HBA': dev_uuid, 'virtual_HCTL': vscsi_dev.get('v-dev') } XendDSCSI(dscsi_uuid, dscsi_record) vscsi_info = { 'devs': vscsi_devs, 'feature-host': vscsi_mode, 'uuid': dev_uuid } if vscsi_be is not None: vscsi_info['backend'] = vscsi_be self['devices'][dev_uuid] = (dev_type, vscsi_info) return True for opt_val in config[1:]: try: opt, val = opt_val dev_info[opt] = val except (TypeError, ValueError): pass # no value for this config option self['devices'][dev_uuid] = (dev_type, dev_info) return True elif dev_uuid in self['devices'] and cfg_xenapi: dev_type, dev_info = self['devices'][dev_uuid] for key, val in cfg_xenapi.items(): dev_info[key] = val self['devices'][dev_uuid] = (dev_type, dev_info) return True return False def device_sxpr(self, dev_uuid = None, dev_type = None, dev_info = None, target = None): """Get Device SXPR by either giving the device UUID or (type, config). @rtype: list of lists @return: device config sxpr """ sxpr = [] if target == None: target = self if dev_uuid != None and dev_uuid in target['devices']: dev_type, dev_info = target['devices'][dev_uuid] if dev_type == None or dev_info == None: raise XendConfigError("Required either UUID or device type and " "configuration dictionary.") sxpr.append(dev_type) if dev_type in ('console', 'vfb'): config = [(opt, val) for opt, val in dev_info.items() if opt != 'other_config'] else: config = [(opt, val) for opt, val in dev_info.items()] sxpr += config return sxpr def ordered_device_refs(self, target = None): result = [] if target == None: target = self # vkbd devices *must* be before vfb devices, otherwise # there is a race condition when setting up devices # where the daemon spawned for the vfb may write stuff # into xenstore vkbd backend, before DevController has # setup permissions on the vkbd backend path. This race # results in domain creation failing with 'device already # connected' messages result.extend([u for u in target['devices'].keys() if target['devices'][u][0] == 'vkbd']) result.extend(target.get('console_refs', []) + target.get('vbd_refs', []) + target.get('vif_refs', []) + target.get('vtpm_refs', [])) result.extend([u for u in target['devices'].keys() if u not in result]) return result def all_devices_sxpr(self, target = None): """Returns the SXPR for all devices in the current configuration.""" sxprs = [] if target == None: target = self if 'devices' not in target: return sxprs ordered_refs = self.ordered_device_refs(target = target) for dev_uuid in ordered_refs: dev_type, dev_info = target['devices'][dev_uuid] if dev_type == 'pci' or dev_type == 'vscsi': # special case for pci devices if dev_type == 'pci': sxpr = ['pci', ['uuid', dev_info['uuid']]] elif dev_type == 'vscsi': sxpr = ['vscsi', ['uuid', dev_info['uuid']], ['feature-host', dev_info['feature-host']]] if dev_info.has_key('backend'): sxpr.append(['backend', dev_info['backend']]) for pci_dev_info in dev_info['devs']: sxpr.append(dev_dict_to_sxp(pci_dev_info)) sxprs.append((dev_type, sxpr)) elif dev_type == 'vusb': sxpr = ['vusb', ['uuid', dev_info['uuid']], ['usb-ver', dev_info['usb-ver']], ['num-ports', dev_info['num-ports']]] port_sxpr = ['port'] for i in range(1, int(dev_info['num-ports']) + 1): if dev_info.has_key('port-%i' % i): port_sxpr.append([i, str(dev_info['port-%i' % i])]) else: port_sxpr.append([i, ""]) sxpr.append(port_sxpr) sxprs.append((dev_type, sxpr)) else: sxpr = self.device_sxpr(dev_type = dev_type, dev_info = dev_info, target = target) sxprs.append((dev_type, sxpr)) return sxprs def image_sxpr(self): """Returns a backwards compatible image SXP expression that is used in xenstore's /vm/<uuid>/image value and xm list.""" image = [self.image_type()] if self.has_key('PV_kernel'): image.append(['kernel', self['PV_kernel']]) if self.has_key('PV_ramdisk') and self['PV_ramdisk']: image.append(['ramdisk', self['PV_ramdisk']]) if self.has_key('PV_args') and self['PV_args']: image.append(['args', self['PV_args']]) if self.has_key('superpages'): image.append(['superpages', self['superpages']]) for key in XENAPI_PLATFORM_CFG_TYPES.keys(): if key in self['platform']: image.append([key, self['platform'][key]]) if 'notes' in self: image.append(self.notes_sxp(self['notes'])) return image def update_with_image_sxp(self, image_sxp, bootloader = False): # Convert Legacy "image" config to Xen API PV_* # configuration log.debug("update_with_image_sxp(%s)" % scrub_password(image_sxp)) # user-specified args must come last: previous releases did this and # some domU kernels rely upon the ordering. kernel_args = sxp.child_value(image_sxp, 'args', '') # attempt to extract extra arguments from SXP config arg_ip = sxp.child_value(image_sxp, 'ip') if arg_ip and not re.search(r'ip=[^ ]+', kernel_args): kernel_args = 'ip=%s ' % arg_ip + kernel_args arg_root = sxp.child_value(image_sxp, 'root') if arg_root and not re.search(r'root=', kernel_args): kernel_args = 'root=%s ' % arg_root + kernel_args if bootloader: self['_temp_using_bootloader'] = '1' self['_temp_kernel'] = sxp.child_value(image_sxp, 'kernel','') self['_temp_ramdisk'] = sxp.child_value(image_sxp, 'ramdisk','') self['_temp_args'] = kernel_args self['use_tmp_kernel'] = True self['use_tmp_ramdisk'] = True else: self['PV_kernel'] = sxp.child_value(image_sxp, 'kernel','') self['PV_ramdisk'] = sxp.child_value(image_sxp, 'ramdisk','') self['PV_args'] = kernel_args self['use_tmp_kernel'] = False self['use_tmp_ramdisk'] = False val = sxp.child_value(image_sxp, 'superpages') if val is not None: self['superpages'] = val val = sxp.child_value(image_sxp, 'memory_sharing') if val is not None: self['memory_sharing'] = val for key in XENAPI_PLATFORM_CFG_TYPES.keys(): val = sxp.child_value(image_sxp, key, None) if val is not None and val != '': self['platform'][key] = val notes = sxp.children(image_sxp, 'notes') if notes: self['notes'] = self.notes_from_sxp(notes[0]) self._hvm_boot_params_from_sxp(image_sxp) def set_notes(self, notes): 'Add parsed elfnotes to image' self['notes'] = notes def get_notes(self): try: return self['notes'] or {} except KeyError: return {} def notes_from_sxp(self, nsxp): notes = {} for note in sxp.children(nsxp): notes[note[0]] = note[1] return notes def notes_sxp(self, notes): nsxp = ['notes'] for k, v in notes.iteritems(): nsxp.append([k, str(v)]) return nsxp def _hvm_boot_params_from_sxp(self, image_sxp): boot = sxp.child_value(image_sxp, 'boot', None) if boot is not None: self['HVM_boot_policy'] = 'BIOS order' self['HVM_boot_params'] = { 'order' : boot } def is_hvm(self): return self['HVM_boot_policy'] != '' def is_stubdom(self): return (self['PV_kernel'].find('ioemu') >= 0) def target(self): return self['target'] def image_type(self): stored_type = self['platform'].get('image_type') return stored_type or (self.is_hvm() and 'hvm' or 'linux') def is_hap(self): return self['platform'].get('hap', 0) def is_pv_and_has_pci(self): for dev_type, dev_info in self.all_devices_sxpr(): if dev_type != 'pci': continue return not self.is_hvm() return False def update_platform_pci(self): pci = [] for dev_type, dev_info in self.all_devices_sxpr(): if dev_type != 'pci': continue for dev in sxp.children(dev_info, 'dev'): domain = sxp.child_value(dev, 'domain') bus = sxp.child_value(dev, 'bus') slot = sxp.child_value(dev, 'slot') func = sxp.child_value(dev, 'func') vdevfn = sxp.child_value(dev, 'vdevfn') opts = pci_opts_list_from_sxp(dev) pci.append([domain, bus, slot, func, vdevfn, opts]) self['platform']['pci'] = pci def handle_fileuris(self): for arg in [('PV_kernel', 'use_tmp_kernel'), ('PV_ramdisk', 'use_tmp_ramdisk')]: if arg[0] in self and self[arg[0]]!='': self[arg[0]], self[arg[1]] \ = xen.util.fileuri.schemes.decode(self[arg[0]]) log.debug("fileuri '%s' = '%s'" % (arg[0], self[arg[0]][:100])) ```
[ { "content": "Repeat the full code snippet:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# h5features documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 2 16:02:53 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\...
[ { "content": "Repeat the full code snippet:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# h5features documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 2 16:02:53 2015.\n#\n# This file is execfile()d with the current directory set to its\n# con...
```python #!/usr/bin/env python # -*- coding: utf-8 -*- # # h5features documentation build configuration file, created by # sphinx-quickstart on Mon Nov 2 16:02:53 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # mocking for ReadTheDoc from mock import Mock as MagicMock class Mock(MagicMock): @classmethod def __getattr__(cls, name): return Mock() MOCK_MODULES = ['scipy', 'numpy', 'h5py', 'scipy.sparse', 'scipy.io'] sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'h5features' author = 'Thomas Schatz, Mathieu Bernard, Roland Thiolliere' copyright = '2015 - 2019, ' + author # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. _VERSION = open('../VERSION').read().strip() # The short X.Y version. version = '.'.join(_VERSION.split('.')[:2]) # The full version, including alpha/beta/rc tags. release = _VERSION # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'h5featuresdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', # Latex figure (float) alignment # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'h5features.tex', 'h5features Documentation', 'Thomas Schatz, Mathieu Bernard, Roland Thiolliere', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'h5features', 'h5features Documentation', [author], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'h5features', 'h5features Documentation', author, 'h5features', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False ```
[ { "content": "Output the full code verbatim (no extra comments):\n```python\n#!/usr/bin/env python\n\"\"\"A module with utilities for dealing with context managers.\"\"\"\n\nfrom typing import ContextManager\nfrom typing import Generic\nfrom typing import Sequence\nfrom typing import TypeVar\n\n_T = TypeVar(\"_...
[ { "content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n#!/usr/bin/env python\n\"\"\"A module with utilities for dealing with context managers.\"\"\"\n\nfrom typing import ContextManager\nfrom typing import Generic\nfrom typing import Sequence\nfrom typing import TypeVar\n\n...
```python #!/usr/bin/env python """A module with utilities for dealing with context managers.""" from typing import ContextManager from typing import Generic from typing import Sequence from typing import TypeVar _T = TypeVar("_T") class NullContext(ContextManager[_T], Generic[_T]): """A context manager that always yields provided values. This class is useful for providing context-like semantics for values that are not context managers themselves because they do not need to manage any resources but are used as context managers. This is a backport of the `contextlib.nullcontext` class introduced in Python 3.7. Once support for old versions of Python is dropped, all uses of this class should be replaced with the one provided by the standard library. """ def __init__(self, value: _T) -> None: self._value = value def __enter__(self) -> _T: return self._value def __exit__(self, exc_type, exc_value, traceback): del exc_type, exc_value, traceback # Unused. class MultiContext(ContextManager[Sequence[_T]], Generic[_T]): """A context managers that sequences multiple context managers. This is similar to the monadic `sequence` operator: it takes a list of context managers, enters each of them and yields list of values that the managers yield. One possible scenario where this class comes in handy is when one needs to open multiple files. """ # TODO: `Collection` would be a better type here, but it is only # available in Python 3.6+. Once support for Python 2 is dropped, this can be # generalized. def __init__(self, managers: Sequence[ContextManager[_T]]) -> None: self._managers = managers def __enter__(self) -> Sequence[_T]: values = [] for manager in self._managers: value = manager.__enter__() values.append(value) return values def __exit__(self, exc_type, exc_value, traceback): for manager in self._managers: manager.__exit__(exc_type, exc_value, traceback) ```
[ { "content": "Recreate the entire code block with identical formatting:\n```python\ndef succ(x):\n return x + 1\n\nsuccessor = succ\ndel succ\nprint(successor(10))\n\n\n# functions inside functions\ndef f1():\n def g1():\n print(\"hi, it's me 'g'\")\n print(\"Thanks for calling me\")\n\n ...
[ { "content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\ndef succ(x):\n return x + 1\n\nsuccessor = succ\ndel succ\nprint(successor(10))\n\n\n# functions inside functions\ndef f1():\n def g1():\n print(\"hi, it's me 'g'\")\n print(\"Thanks for calli...
```python def succ(x): return x + 1 successor = succ del succ print(successor(10)) # functions inside functions def f1(): def g1(): print("hi, it's me 'g'") print("Thanks for calling me") print("This is the function 'f'") print("I am calling 'g' now") g1() def temperature(t): def celsius2fahrenheit(x): return 9 * x / 5 + 32 result = "It's " + str(celsius2fahrenheit(t)) + " degrees!" return result # functions as parameters def g(): print("Hi, it's me 'g'") print("Thanks for calling me") def f(func): print("Hi, it's me 'f'") print("I will call 'func' now") func() print("func's real name is " + func.__name__) # functions return functions def f2(x): def g2(y): return y + x + 3 return g2 # a simple decorator def out_decorator(func): def function_wrapper(x): print("Before calling " + func.__name__) func(x) print("After calling " + func.__name__) return function_wrapper # below execution sequence: foo = out_decorator(foo), then foo(x) @out_decorator def foo(x): print("Hi, foo has been called with " + str(x)) foo(42) # means functions_wrapper(42) ```
[ { "content": "Here is a code file:\n```python\n# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2015 Florian Bruhin (The Compiler) <mail@qutebrowser.org>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms ...
[ { "content": "Here is a code file:\n<|memory_start|>```python\n# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2015 Florian Bruhin (The Compiler) <mail@qutebrowser.org>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it ...
```python # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2015 Florian Bruhin (The Compiler) <mail@qutebrowser.org> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Configuration storage and config-related utilities. This borrows a lot of ideas from configparser, but also has some things that are fundamentally different. This is why nothing inherits from configparser, but we borrow some methods and classes from there where it makes sense. """ import os import sys import os.path import functools import configparser import contextlib import collections import collections.abc from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QUrl, QSettings from qutebrowser.config import configdata, configexc, textwrapper from qutebrowser.config.parsers import ini, keyconf from qutebrowser.commands import cmdexc, cmdutils from qutebrowser.utils import (message, objreg, utils, standarddir, log, qtutils, error, usertypes) from qutebrowser.utils.usertypes import Completion UNSET = object() class change_filter: # pylint: disable=invalid-name """Decorator to filter calls based on a config section/option matching. This could also be a function, but as a class (with a "wrong" name) it's much cleaner to implement. Attributes: _sectname: The section to be filtered. _optname: The option to be filtered. _function: Whether a function rather than a method is decorated. """ def __init__(self, sectname, optname=None, function=False): """Save decorator arguments. Gets called on parse-time with the decorator arguments. Args: sectname: The section to be filtered. optname: The option to be filtered. function: Whether a function rather than a method is decorated. """ if sectname not in configdata.DATA: raise configexc.NoSectionError(sectname) if optname is not None and optname not in configdata.DATA[sectname]: raise configexc.NoOptionError(optname, sectname) self._sectname = sectname self._optname = optname self._function = function def __call__(self, func): """Filter calls to the decorated function. Gets called when a function should be decorated. Adds a filter which returns if we're not interested in the change-event and calls the wrapped function if we are. We assume the function passed doesn't take any parameters. Args: func: The function to be decorated. Return: The decorated function. """ if self._function: @pyqtSlot(str, str) @functools.wraps(func) def wrapper(sectname=None, optname=None): # pylint: disable=missing-docstring if sectname is None and optname is None: # Called directly, not from a config change event. return func() elif sectname != self._sectname: return elif self._optname is not None and optname != self._optname: return else: return func() else: @pyqtSlot(str, str) @functools.wraps(func) def wrapper(wrapper_self, sectname=None, optname=None): # pylint: disable=missing-docstring if sectname is None and optname is None: # Called directly, not from a config change event. return func(wrapper_self) elif sectname != self._sectname: return elif self._optname is not None and optname != self._optname: return else: return func(wrapper_self) return wrapper def get(*args, **kwargs): """Convenience method to call get(...) of the config instance.""" return objreg.get('config').get(*args, **kwargs) def section(sect): """Get a config section from the global config.""" return objreg.get('config')[sect] def _init_main_config(parent=None): """Initialize the main config. Args: parent: The parent to pass to ConfigManager. """ args = objreg.get('args') try: config_obj = ConfigManager(standarddir.config(), 'qutebrowser.conf', args.relaxed_config, parent=parent) except (configexc.Error, configparser.Error, UnicodeDecodeError) as e: log.init.exception(e) errstr = "Error while reading config:" try: errstr += "\n\n{} -> {}:".format( e.section, e.option) # pylint: disable=no-member except AttributeError: pass errstr += "\n" error.handle_fatal_exc(e, args, "Error while reading config!", pre_text=errstr) # We didn't really initialize much so far, so we just quit hard. sys.exit(usertypes.Exit.err_config) else: objreg.register('config', config_obj) if standarddir.config() is not None: filename = os.path.join(standarddir.config(), 'qutebrowser.conf') save_manager = objreg.get('save-manager') save_manager.add_saveable( 'config', config_obj.save, config_obj.changed, config_opt=('general', 'auto-save-config'), filename=filename) for sect in config_obj.sections.values(): for opt in sect.values.values(): if opt.values['conf'] is None: # Option added to built-in defaults but not in user's # config yet save_manager.save('config', explicit=True, force=True) return def _init_key_config(parent): """Initialize the key config. Args: parent: The parent to use for the KeyConfigParser. """ args = objreg.get('args') try: key_config = keyconf.KeyConfigParser(standarddir.config(), 'keys.conf', args.relaxed_config, parent=parent) except (keyconf.KeyConfigError, UnicodeDecodeError) as e: log.init.exception(e) errstr = "Error while reading key config:\n" if e.lineno is not None: errstr += "In line {}: ".format(e.lineno) error.handle_fatal_exc(e, args, "Error while reading key config!", pre_text=errstr) # We didn't really initialize much so far, so we just quit hard. sys.exit(usertypes.Exit.err_key_config) else: objreg.register('key-config', key_config) if standarddir.config() is not None: save_manager = objreg.get('save-manager') filename = os.path.join(standarddir.config(), 'keys.conf') save_manager.add_saveable( 'key-config', key_config.save, key_config.config_dirty, config_opt=('general', 'auto-save-config'), filename=filename, dirty=key_config.is_dirty) def _init_misc(): """Initialize misc. config-related files.""" save_manager = objreg.get('save-manager') state_config = ini.ReadWriteConfigParser(standarddir.data(), 'state') for sect in ('general', 'geometry'): try: state_config.add_section(sect) except configparser.DuplicateSectionError: pass # See commit a98060e020a4ba83b663813a4b9404edb47f28ad. state_config['general'].pop('fooled', None) objreg.register('state-config', state_config) save_manager.add_saveable('state-config', state_config.save) # We need to import this here because lineparser needs config. from qutebrowser.misc import lineparser command_history = lineparser.LimitLineParser( standarddir.data(), 'cmd-history', limit=('completion', 'cmd-history-max-items'), parent=objreg.get('config')) objreg.register('command-history', command_history) save_manager.add_saveable('command-history', command_history.save, command_history.changed) # Set the QSettings path to something like # ~/.config/qutebrowser/qsettings/qutebrowser/qutebrowser.conf so it # doesn't overwrite our config. # # This fixes one of the corruption issues here: # https://github.com/The-Compiler/qutebrowser/issues/515 if standarddir.config() is None: path = os.devnull else: path = os.path.join(standarddir.config(), 'qsettings') for fmt in (QSettings.NativeFormat, QSettings.IniFormat): QSettings.setPath(fmt, QSettings.UserScope, path) def init(parent=None): """Initialize the config. Args: parent: The parent to pass to QObjects which get initialized. """ _init_main_config(parent) _init_key_config(parent) _init_misc() def _get_value_transformer(mapping): """Get a function which transforms a value for CHANGED_OPTIONS. Args: mapping: A dictionary mapping old values to new values. Value is not transformed if the supplied value doesn't match the old value. Return: A function which takes a value and transforms it. """ def transformer(val): try: return mapping[val] except KeyError: return val return transformer def _transform_position(val): """Transformer for position values.""" mapping = { 'north': 'top', 'south': 'bottom', 'west': 'left', 'east': 'right', } try: return mapping[val] except KeyError: return val class ConfigManager(QObject): """Configuration manager for qutebrowser. Class attributes: KEY_ESCAPE: Chars which need escaping when they occur as first char in a line. ESCAPE_CHAR: The char to be used for escaping RENAMED_SECTIONS: A mapping of renamed sections, {'oldname': 'newname'} RENAMED_OPTIONS: A mapping of renamed options, {('section', 'oldname'): 'newname'} CHANGED_OPTIONS: A mapping of arbitrarily changed options, {('section', 'option'): callable}. The callable takes the old value and returns the new one. DELETED_OPTIONS: A (section, option) list of deleted options. Attributes: sections: The configuration data as an OrderedDict. _fname: The filename to be opened. _configdir: The dictionary to read the config from and save it in. _interpolation: An configparser.Interpolation object _proxies: configparser.SectionProxy objects for sections. _initialized: Whether the ConfigManager is fully initialized yet. Signals: changed: Emitted when a config option changed. style_changed: When style caches need to be invalidated. Args: the changed section and option. """ KEY_ESCAPE = r'\#[' ESCAPE_CHAR = '\\' RENAMED_SECTIONS = { 'permissions': 'content' } RENAMED_OPTIONS = { ('colors', 'tab.fg.odd'): 'tabs.fg.odd', ('colors', 'tab.fg.even'): 'tabs.fg.even', ('colors', 'tab.fg.selected'): 'tabs.fg.selected.odd', ('colors', 'tabs.fg.selected'): 'tabs.fg.selected.odd', ('colors', 'tab.bg.odd'): 'tabs.bg.odd', ('colors', 'tab.bg.even'): 'tabs.bg.even', ('colors', 'tab.bg.selected'): 'tabs.bg.selected.odd', ('colors', 'tabs.bg.selected'): 'tabs.bg.selected.odd', ('colors', 'tab.bg.bar'): 'tabs.bg.bar', ('colors', 'tab.indicator.start'): 'tabs.indicator.start', ('colors', 'tab.indicator.stop'): 'tabs.indicator.stop', ('colors', 'tab.indicator.error'): 'tabs.indicator.error', ('colors', 'tab.indicator.system'): 'tabs.indicator.system', ('tabs', 'auto-hide'): 'hide-auto', ('completion', 'history-length'): 'cmd-history-max-items', ('colors', 'downloads.fg'): 'downloads.fg.start', } DELETED_OPTIONS = [ ('colors', 'tab.separator'), ('colors', 'tabs.separator'), ('colors', 'completion.item.bg'), ('tabs', 'indicator-space'), ('tabs', 'hide-auto'), ('tabs', 'hide-always'), ] CHANGED_OPTIONS = { ('content', 'cookies-accept'): _get_value_transformer({'default': 'no-3rdparty'}), ('tabs', 'position'): _transform_position, ('ui', 'downloads-position'): _transform_position, ('ui', 'remove-finished-downloads'): _get_value_transformer({'false': '-1', 'true': '1000'}) } changed = pyqtSignal(str, str) style_changed = pyqtSignal(str, str) def __init__(self, configdir, fname, relaxed=False, parent=None): super().__init__(parent) self._initialized = False self.sections = configdata.data() self._interpolation = configparser.ExtendedInterpolation() self._proxies = {} for sectname in self.sections: self._proxies[sectname] = SectionProxy(self, sectname) self._fname = fname if configdir is None: self._configdir = None self._initialized = True else: self._configdir = configdir parser = ini.ReadConfigParser(configdir, fname) self._from_cp(parser, relaxed) self._initialized = True self._validate_all() def __getitem__(self, key): """Get a section from the config.""" return self._proxies[key] def __repr__(self): return utils.get_repr(self, fname=self._fname) def __str__(self): """Get the whole config as a string.""" lines = configdata.FIRST_COMMENT.strip('\n').splitlines() for sectname, sect in self.sections.items(): lines.append('\n[{}]'.format(sectname)) lines += self._str_section_desc(sectname) lines += self._str_option_desc(sectname, sect) lines += self._str_items(sect) return '\n'.join(lines) + '\n' def _str_section_desc(self, sectname): """Get the section description string for sectname.""" wrapper = textwrapper.TextWrapper() lines = [] seclines = configdata.SECTION_DESC[sectname].splitlines() for secline in seclines: if 'http://' in secline or 'https://' in secline: lines.append('# ' + secline) else: lines += wrapper.wrap(secline) return lines def _str_option_desc(self, sectname, sect): """Get the option description strings for sect/sectname.""" wrapper = textwrapper.TextWrapper(initial_indent='#' + ' ' * 5, subsequent_indent='#' + ' ' * 5) lines = [] if not getattr(sect, 'descriptions', None): return lines for optname, option in sect.items(): lines.append('#') if option.typ.special: typestr = '' else: typestr = ' ({})'.format(option.typ.__class__.__name__) lines.append("# {}{}:".format(optname, typestr)) try: desc = self.sections[sectname].descriptions[optname] except KeyError: log.config.exception("No description for {}.{}!".format( sectname, optname)) continue for descline in desc.splitlines(): lines += wrapper.wrap(descline) valid_values = option.typ.valid_values if valid_values is not None: if valid_values.descriptions: for val in valid_values: desc = valid_values.descriptions[val] lines += wrapper.wrap(" {}: {}".format(val, desc)) else: lines += wrapper.wrap("Valid values: {}".format(', '.join( valid_values))) lines += wrapper.wrap("Default: {}".format( option.values['default'])) return lines def _str_items(self, sect): """Get the option items as string for sect.""" lines = [] for optname, option in sect.items(): value = option.value(startlayer='conf') for c in self.KEY_ESCAPE: if optname.startswith(c): optname = optname.replace(c, self.ESCAPE_CHAR + c, 1) # configparser can't handle = in keys :( optname = optname.replace('=', '<eq>') keyval = '{} = {}'.format(optname, value) lines.append(keyval) return lines def _get_real_sectname(self, cp, sectname): """Get an old or new section name based on a configparser. This checks if sectname is in cp, and if not, migrates it if needed and tries again. Args: cp: The configparser to check. sectname: The new section name. Returns: The section name in the configparser as a string, or None if the configparser doesn't contain the section. """ reverse_renamed_sections = {v: k for k, v in self.RENAMED_SECTIONS.items()} if sectname in reverse_renamed_sections: old_sectname = reverse_renamed_sections[sectname] else: old_sectname = sectname if old_sectname in cp: return old_sectname elif sectname in cp: return sectname else: return None def _from_cp(self, cp, relaxed=False): """Read the config from a configparser instance. Args: cp: The configparser instance to read the values from. relaxed: Whether to ignore inexistent sections/options. """ for sectname in cp: if sectname in self.RENAMED_SECTIONS: sectname = self.RENAMED_SECTIONS[sectname] if sectname is not 'DEFAULT' and sectname not in self.sections: if not relaxed: raise configexc.NoSectionError(sectname) for sectname in self.sections: self._from_cp_section(sectname, cp, relaxed) def _from_cp_section(self, sectname, cp, relaxed): """Read a single section from a configparser instance. Args: sectname: The name of the section to read. cp: The configparser instance to read the values from. relaxed: Whether to ignore inexistent options. """ real_sectname = self._get_real_sectname(cp, sectname) if real_sectname is None: return for k, v in cp[real_sectname].items(): if k.startswith(self.ESCAPE_CHAR): k = k[1:] if (sectname, k) in self.DELETED_OPTIONS: return if (sectname, k) in self.RENAMED_OPTIONS: k = self.RENAMED_OPTIONS[sectname, k] if (sectname, k) in self.CHANGED_OPTIONS: func = self.CHANGED_OPTIONS[(sectname, k)] v = func(v) try: self.set('conf', sectname, k, v, validate=False) except configexc.NoOptionError: if relaxed: pass else: raise def _validate_all(self): """Validate all values set in self._from_cp.""" for sectname, sect in self.sections.items(): mapping = {key: val.value() for key, val in sect.values.items()} for optname, opt in sect.items(): interpolated = self._interpolation.before_get( self, sectname, optname, opt.value(), mapping) try: opt.typ.validate(interpolated) except configexc.ValidationError as e: e.section = sectname e.option = optname raise def _changed(self, sectname, optname): """Notify other objects the config has changed.""" log.config.debug("Config option changed: {} -> {}".format( sectname, optname)) if sectname in ('colors', 'fonts'): self.style_changed.emit(sectname, optname) self.changed.emit(sectname, optname) def _after_set(self, changed_sect, changed_opt): """Clean up caches and emit signals after an option has been set.""" self.get.cache_clear() self._changed(changed_sect, changed_opt) # Options in the same section and ${optname} interpolation. for optname, option in self.sections[changed_sect].items(): if '${' + changed_opt + '}' in option.value(): self._changed(changed_sect, optname) # Options in any section and ${sectname:optname} interpolation. for sectname, sect in self.sections.items(): for optname, option in sect.items(): if ('${' + changed_sect + ':' + changed_opt + '}' in option.value()): self._changed(sectname, optname) def items(self, sectname, raw=True): """Get a list of (optname, value) tuples for a section. Implemented for configparser interpolation compatibility Args: sectname: The name of the section to get. raw: Whether to get raw values. Note this parameter only exists for ConfigParser compatibility and raw=False is not supported. """ items = [] if not raw: raise ValueError("items() with raw=True is not implemented!") for optname, option in self.sections[sectname].items(): items.append((optname, option.value())) return items def has_option(self, sectname, optname): """Check if option exists in section. Args: sectname: The section name. optname: The option name Return: True if the option and section exist, False otherwise. """ if sectname not in self.sections: return False return optname in self.sections[sectname] def remove_option(self, sectname, optname): """Remove an option. Args: sectname: The section where to remove an option. optname: The option name to remove. Return: True if the option existed, False otherwise. """ try: sectdict = self.sections[sectname] except KeyError: raise configexc.NoSectionError(sectname) optname = self.optionxform(optname) existed = optname in sectdict if existed: del sectdict[optname] self.get.cache_clear() return existed @functools.lru_cache() def get(self, sectname, optname, raw=False, transformed=True, fallback=UNSET): """Get the value from a section/option. We don't support the vars argument from configparser.get as it's not hashable. Args: sectname: The section to get the option from. optname: The option name raw: Whether to get the uninterpolated, untransformed value. transformed: Whether the value should be transformed. Return: The value of the option. """ if not self._initialized: raise Exception("get got called before initialization was " "complete!") try: sect = self.sections[sectname] except KeyError: if fallback is not UNSET: return fallback raise configexc.NoSectionError(sectname) try: val = sect[optname] except KeyError: if fallback is not UNSET: return fallback raise configexc.NoOptionError(optname, sectname) if raw: return val.value() mapping = {key: val.value() for key, val in sect.values.items()} newval = self._interpolation.before_get(self, sectname, optname, val.value(), mapping) if transformed: newval = val.typ.transform(newval) return newval @contextlib.contextmanager def _handle_config_error(self): """Catch errors in set_command and raise CommandError.""" try: yield except (configexc.NoOptionError, configexc.NoSectionError, configexc.ValidationError) as e: raise cmdexc.CommandError("set: {}".format(e)) except (configexc.Error, configparser.Error) as e: raise cmdexc.CommandError("set: {} - {}".format( e.__class__.__name__, e)) @cmdutils.register(name='set', instance='config', win_id='win_id', completion=[Completion.section, Completion.option, Completion.value]) def set_command(self, win_id, section_=None, option=None, value=None, temp=False, print_=False): """Set an option. If the option name ends with '?', the value of the option is shown instead. If the option name ends with '!' and it is a boolean value, toggle it. // Wrapper for self.set() to output exceptions in the status bar. Args: section_: The section where the option is in. option: The name of the option. value: The value to set. temp: Set value temporarily. print_: Print the value after setting. """ if section_ is not None and option is None: raise cmdexc.CommandError( "set: Either both section and option have to be given, or " "neither!") if section_ is None and option is None: tabbed_browser = objreg.get('tabbed-browser', scope='window', window=win_id) tabbed_browser.openurl(QUrl('qute:settings'), newtab=False) return if option.endswith('?') and option != '?': option = option[:-1] print_ = True else: with self._handle_config_error(): if option.endswith('!') and option != '!' and value is None: option = option[:-1] val = self.get(section_, option) layer = 'temp' if temp else 'conf' if isinstance(val, bool): self.set(layer, section_, option, str(not val)) else: raise cmdexc.CommandError( "set: Attempted inversion of non-boolean value.") elif value is not None: layer = 'temp' if temp else 'conf' self.set(layer, section_, option, value) else: raise cmdexc.CommandError("set: The following arguments " "are required: value") if print_: with self._handle_config_error(): val = self.get(section_, option, transformed=False) message.info(win_id, "{} {} = {}".format( section_, option, val), immediately=True) def set(self, layer, sectname, optname, value, validate=True): """Set an option. Args: layer: A layer name as string (conf/temp/default). sectname: The name of the section to change. optname: The name of the option to change. value: The new value. validate: Whether to validate the value immediately. """ try: value = self._interpolation.before_set(self, sectname, optname, value) except ValueError as e: raise configexc.InterpolationSyntaxError(optname, sectname, str(e)) try: sect = self.sections[sectname] except KeyError: raise configexc.NoSectionError(sectname) mapping = {key: val.value() for key, val in sect.values.items()} if validate: interpolated = self._interpolation.before_get( self, sectname, optname, value, mapping) else: interpolated = None try: sect.setv(layer, optname, value, interpolated) except KeyError: raise configexc.NoOptionError(optname, sectname) else: if self._initialized: self._after_set(sectname, optname) def save(self): """Save the config file.""" if self._configdir is None: return configfile = os.path.join(self._configdir, self._fname) log.destroy.debug("Saving config to {}".format(configfile)) with qtutils.savefile_open(configfile) as f: f.write(str(self)) def dump_userconfig(self): """Get the part of the config which was changed by the user. Return: The changed config part as string. """ lines = [] for sectname, sect in self.sections.items(): changed = sect.dump_userconfig() if changed: lines.append('[{}]'.format(sectname)) lines += ['{} = {}'.format(k, v) for k, v in changed] if not lines: lines = ['<Default configuration>'] return '\n'.join(lines) def optionxform(self, val): """Implemented to be compatible with ConfigParser interpolation.""" return val class SectionProxy(collections.abc.MutableMapping): """A proxy for a single section from a config. Attributes: _conf: The Config object. _name: The section name. """ def __init__(self, conf, name): """Create a view on a section. Args: conf: The Config object. name: The section name. """ self.conf = conf self.name = name def __repr__(self): return utils.get_repr(self, name=self.name) def __getitem__(self, key): if not self.conf.has_option(self.name, key): raise KeyError(key) return self.conf.get(self.name, key) def __setitem__(self, key, value): return self.conf.set('conf', self.name, key, value) def __delitem__(self, key): if not (self.conf.has_option(self.name, key) and self.conf.remove_option(self.name, key)): raise KeyError(key) def __contains__(self, key): return self.conf.has_option(self.name, key) def __len__(self): return len(self._options()) def __iter__(self): return self._options().__iter__() def _options(self): """Get the option keys from this section.""" return self.conf.sections[self.name].keys() def get(self, optname, *, raw=False): # pylint: disable=arguments-differ """Get a value from this section. We deliberately don't support the default argument here, but have a raw argument instead. Args: optname: The option name to get. raw: Whether to get a raw value or not. """ return self.conf.get(self.name, optname, raw=raw) ```
[ { "content": "Repeat the following code:\n```python\n\"\"\"\nThis is a module to read in an HDF5 file with CCFs.\nUse this to determine the best parameters, and plot the best CCF for each star/date\n\"\"\"\nfrom collections import defaultdict\nimport logging\n\nimport h5py\nimport numpy as np\nimport pandas as ...
[ { "content": "Repeat the following code:\n<|memory_start|>```python\n\"\"\"\nThis is a module to read in an HDF5 file with CCFs.\nUse this to determine the best parameters, and plot the best CCF for each star/date\n\"\"\"\nfrom collections import defaultdict\nimport logging\n\nimport h5py\nimport numpy as np\ni...
```python """ This is a module to read in an HDF5 file with CCFs. Use this to determine the best parameters, and plot the best CCF for each star/date """ from collections import defaultdict import logging import h5py import numpy as np import pandas as pd from scipy.interpolate import InterpolatedUnivariateSpline as spline class CCF_Interface(object): def __init__(self, filename, vel=np.arange(-900, 900, 1)): self.hdf5 = h5py.File(filename, 'r') self.velocities = vel self._df = None def __getitem__(self, path): return self.hdf5[path] def list_stars(self, print2screen=False): """ List the stars available in the HDF5 file, and the dates available for each :return: A list of the stars """ if print2screen: for star in sorted(self.hdf5.keys()): print(star) for date in sorted(self.hdf5[star].keys()): print('\t{}'.format(date)) return sorted(self.hdf5.keys()) def list_dates(self, star, print2screen=False): """ List the dates available for the given star :param star: The name of the star :return: A list of dates the star was observed """ if print2screen: for date in sorted(self.hdf5[star].keys()): print(date) return sorted(self.hdf5[star].keys()) def load_cache(self, addmode='simple'): """ Read in the whole HDF5 file. This will take a while and take a few Gb of memory, but will speed things up considerably :keyword addmode: The way the individual CCFs were added. Options are: - 'simple' - 'ml' - 'all' (saves all addmodes) """ self._df = self._compile_data(addmode=addmode) def _compile_data(self, starname=None, date=None, addmode='simple', read_ccf=True): """ Private function. This reads in all the datasets for the given star and date :param starname: the name of the star. Must be in self.hdf5 :param date: The date to search. Must be in self.hdf5[star] :keyword addmode: The way the individual CCFs were added. Options are: - 'simple' - 'ml' - 'all' (saves all addmodes) :return: a pandas DataFrame with the columns: - star - date - temperature - log(g) - [Fe/H] - vsini - addmode - rv (at maximum CCF value) - CCF height (maximum) """ if starname is None: df_list = [] star_list = self.list_stars() for star in star_list: date_list = self.list_dates(star) for date in date_list: logging.debug('Reading in metadata for star {}, date {}'.format(star, date)) df_list.append(self._compile_data(star, date, addmode=addmode, read_ccf=read_ccf)) return pd.concat(df_list, ignore_index=True) elif starname is not None and date is None: df_list = [] date_list = self.list_dates(starname) for date in date_list: logging.debug('Reading in metadata for date {}'.format(date)) df_list.append(self._compile_data(starname, date, addmode=addmode, read_ccf=read_ccf)) return pd.concat(df_list, ignore_index=True) else: if self._df is not None: return self._df.loc[(self._df['Star'] == starname) & (self._df['Date'] == date)].copy() #print('Stars: ', self.list_stars()) datasets = self.hdf5[starname][date].keys() data = defaultdict(list) for ds_name, ds in self.hdf5[starname][date].iteritems(): # in datasets: #ds = self.hdf5[starname][date][ds_name] try: am = ds.attrs['addmode'] if addmode == 'all' or addmode == am: data['T'].append(ds.attrs['T']) data['logg'].append(ds.attrs['logg']) data['[Fe/H]'].append(ds.attrs['[Fe/H]']) data['vsini'].append(ds.attrs['vsini']) data['addmode'].append(am) data['name'].append(ds.name) try: data['ccf_max'].append(ds.attrs['ccf_max']) data['vel_max'].append(ds.attrs['vel_max']) except KeyError: vel, corr = ds.value idx = np.argmax(corr) data['ccf_max'].append(corr[idx]) data['vel_max'].append(vel[idx]) if read_ccf: v = ds.value vel, corr = v[0], v[1] sorter = np.argsort(vel) fcn = spline(vel[sorter], corr[sorter]) data['ccf'].append(fcn(self.velocities)) except: raise IOError('Something weird happened with dataset {}!'.format(ds.name)) data['Star'] = [starname] * len(data['T']) data['Date'] = [date] * len(data['T']) df = pd.DataFrame(data=data) return df def get_temperature_run(self, starname=None, date=None, df=None): """ Return the maximum ccf height for each temperature. Either starname AND date, or df must be given :param starname: The name of the star :param date: The date of the observation :param df: Input dataframe, such as from _compile_data. Overrides starname and date, if given :return: a pandas DataFrame with all the best parameters for each temperature """ # Get the dataframe if it isn't given if df is None: if starname is None or date is None: raise ValueError('Must give either starname or date to get_temperature_run!') df = self._compile_data(starname, date) # Find the maximum CCF for each set of parameters fcn = lambda row: (np.max(row), self.velocities[np.argmax(row)]) vals = df['ccf'].map(fcn) df['ccf_max'] = vals.map(lambda l: l[0]) df['rv'] = vals.map(lambda l: l[1]) # Find the best parameters for each temperature d = defaultdict(list) temperatures = pd.unique(df['T']) for T in temperatures: good = df.loc[df['T'] == T] best = good.loc[good.ccf_max == good.ccf_max.max()] d['vsini'].append(best['vsini'].item()) d['logg'].append(best['logg'].item()) d['[Fe/H]'].append(best['[Fe/H]'].item()) d['rv'].append(best['rv'].item()) d['ccf_value'].append(best.ccf_max.item()) d['T'].append(T) d['metal'].append(best['[Fe/H]'].item()) return pd.DataFrame(data=d) def get_ccf(self, params, df=None): """ Get the ccf with the given parameters. A dataframe can be given to speed things up :param params: All the parameters necessary to define a single ccf. This should be a python dictionary with the keys: - 'starname': The name of the star. Try self.list_stars() for the options. - 'date': The UT date of the observations. Try self.list_dates() for the options. - 'T': temperature of the model - 'logg': the log(g) of the model - 'vsini': the vsini by which the model was broadened before correlation - '[Fe/H]': the metallicity of the model - 'addmode': The way the order CCFs were added to make a total one. Can be: - 'simple' - 'ml' - 'weighted' - 'dc' :param df: a pandas DataFrame such as outputted by _compile_data :return: a pandas DataFrame with columns of velocity and CCF power """ if df is None: try: df = self._compile_data(params['starname'], params['date']) except KeyError: raise KeyError('Must give get_ccf params with starname and date keywords, if df is not given!') Tvals = df['T'].unique() T = Tvals[np.argmin(abs(Tvals - params['T']))] good = df.loc[(df['T'] == T) & (df.logg == params['logg']) & (df.vsini == params['vsini']) \ & (df['[Fe/H]'] == params['[Fe/H]']) & (df.addmode == params['addmode'])] return pd.DataFrame(data={'velocity': self.velocities, 'CCF': good['ccf'].item()}) ```
[ { "content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\n#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2021\n# Leandro Toledo de Souza <devs@python-telegram-bot.org>\n#\n# This program is free softw...
[ { "content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\n#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2021\n# Leandro Toledo de Souza <devs@python-telegram-bot.org>\n#\n# This progr...
```python #!/usr/bin/env python # # A library that provides a Python interface to the Telegram Bot API # Copyright (C) 2015-2021 # Leandro Toledo de Souza <devs@python-telegram-bot.org> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser Public License for more details. # # You should have received a copy of the GNU Lesser Public License # along with this program. If not, see [http://www.gnu.org/licenses/]. """This module contains an object that represents a Telegram PreCheckoutQuery.""" from typing import TYPE_CHECKING, Any, Optional from telegram import OrderInfo, TelegramObject, User from telegram.utils.helpers import DEFAULT_NONE from telegram.utils.types import JSONDict, ODVInput if TYPE_CHECKING: from telegram import Bot class PreCheckoutQuery(TelegramObject): """This object contains information about an incoming pre-checkout query. Objects of this class are comparable in terms of equality. Two objects of this class are considered equal, if their :attr:`id` is equal. Note: In Python ``from`` is a reserved word, use ``from_user`` instead. Args: id (:obj:`str`): Unique query identifier. from_user (:class:`telegram.User`): User who sent the query. currency (:obj:`str`): Three-letter ISO 4217 currency code. total_amount (:obj:`int`): Total price in the smallest units of the currency (integer, not float/double). For example, for a price of US$ 1.45 pass ``amount = 145``. See the :obj:`exp` parameter in `currencies.json <https://core.telegram.org/bots/payments/currencies.json>`_, it shows the number of digits past the decimal point for each currency (2 for the majority of currencies). invoice_payload (:obj:`str`): Bot specified invoice payload. shipping_option_id (:obj:`str`, optional): Identifier of the shipping option chosen by the user. order_info (:class:`telegram.OrderInfo`, optional): Order info provided by the user. bot (:class:`telegram.Bot`, optional): The Bot to use for instance methods. **kwargs (:obj:`dict`): Arbitrary keyword arguments. Attributes: id (:obj:`str`): Unique query identifier. from_user (:class:`telegram.User`): User who sent the query. currency (:obj:`str`): Three-letter ISO 4217 currency code. total_amount (:obj:`int`): Total price in the smallest units of the currency. invoice_payload (:obj:`str`): Bot specified invoice payload. shipping_option_id (:obj:`str`): Optional. Identifier of the shipping option chosen by the user. order_info (:class:`telegram.OrderInfo`): Optional. Order info provided by the user. bot (:class:`telegram.Bot`): Optional. The Bot to use for instance methods. """ __slots__ = ( 'bot', 'invoice_payload', 'shipping_option_id', 'currency', 'order_info', 'total_amount', 'id', 'from_user', '_id_attrs', ) def __init__( self, id: str, # pylint: disable=W0622 from_user: User, currency: str, total_amount: int, invoice_payload: str, shipping_option_id: str = None, order_info: OrderInfo = None, bot: 'Bot' = None, **_kwargs: Any, ): self.id = id # pylint: disable=C0103 self.from_user = from_user self.currency = currency self.total_amount = total_amount self.invoice_payload = invoice_payload self.shipping_option_id = shipping_option_id self.order_info = order_info self.bot = bot self._id_attrs = (self.id,) @classmethod def de_json(cls, data: Optional[JSONDict], bot: 'Bot') -> Optional['PreCheckoutQuery']: """See :meth:`telegram.TelegramObject.de_json`.""" data = cls._parse_data(data) if not data: return None data['from_user'] = User.de_json(data.pop('from'), bot) data['order_info'] = OrderInfo.de_json(data.get('order_info'), bot) return cls(bot=bot, **data) def answer( # pylint: disable=C0103 self, ok: bool, error_message: str = None, timeout: ODVInput[float] = DEFAULT_NONE, api_kwargs: JSONDict = None, ) -> bool: """Shortcut for:: bot.answer_pre_checkout_query(update.pre_checkout_query.id, *args, **kwargs) For the documentation of the arguments, please see :meth:`telegram.Bot.answer_pre_checkout_query`. """ return self.bot.answer_pre_checkout_query( pre_checkout_query_id=self.id, ok=ok, error_message=error_message, timeout=timeout, api_kwargs=api_kwargs, ) ```
[ { "content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# Copyright (C) 2004-2016 by\n# Aric Hagberg <hagberg@lanl.gov>\n# Dan Schult <dschult@colgate.edu>\n# Pieter Swart <swart@lanl.gov>\n# All rights reserved.\n# BSD license.\n\"\"\"\nGenerators for som...
[ { "content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# Copyright (C) 2004-2016 by\n# Aric Hagberg <hagberg@lanl.gov>\n# Dan Schult <dschult@colgate.edu>\n# Pieter Swart <swart@lanl.gov>\n# All rights reserved.\n# BSD license.\n\"\"\"\nGe...
```python # Copyright (C) 2004-2016 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. """ Generators for some classic graphs. The typical graph generator is called as follows: >>> G=nx.complete_graph(100) returning the complete graph on n nodes labeled 0,..,99 as a simple graph. Except for empty_graph, all the generators in this module return a Graph class (i.e. a simple, undirected graph). """ from __future__ import division import itertools import networkx as nx from networkx.algorithms.bipartite.generators import complete_bipartite_graph from networkx.utils import accumulate from networkx.utils import flatten from networkx.utils import is_list_of_ints __author__ ="""Aric Hagberg (hagberg@lanl.gov)\nPieter Swart (swart@lanl.gov)""" __all__ = [ 'balanced_tree', 'barbell_graph', 'complete_graph', 'complete_multipartite_graph', 'circular_ladder_graph', 'circulant_graph', 'cycle_graph', 'dorogovtsev_goltsev_mendes_graph', 'empty_graph', 'full_rary_tree', 'grid_graph', 'grid_2d_graph', 'hypercube_graph', 'ladder_graph', 'lollipop_graph', 'null_graph', 'path_graph', 'star_graph', 'trivial_graph', 'wheel_graph'] #------------------------------------------------------------------- # Some Classic Graphs #------------------------------------------------------------------- def _tree_edges(n,r): # helper function for trees # yields edges in rooted tree at 0 with n nodes and branching ratio r nodes=iter(range(n)) parents=[next(nodes)] # stack of max length r while parents: source=parents.pop(0) for i in range(r): try: target=next(nodes) parents.append(target) yield source,target except StopIteration: break def full_rary_tree(r, n, create_using=None): """Creates a full r-ary tree of n vertices. Sometimes called a k-ary, n-ary, or m-ary tree. "... all non-leaf vertices have exactly r children and all levels are full except for some rightmost position of the bottom level (if a leaf at the bottom level is missing, then so are all of the leaves to its right." [1]_ Parameters ---------- r : int branching factor of the tree n : int Number of nodes in the tree create_using : NetworkX graph type, optional Use specified type to construct graph (default = networkx.Graph) Returns ------- G : networkx Graph An r-ary tree with n nodes References ---------- .. [1] An introduction to data structures and algorithms, James Andrew Storer, Birkhauser Boston 2001, (page 225). """ G=nx.empty_graph(n,create_using) G.add_edges_from(_tree_edges(n,r)) return G def balanced_tree(r, h, create_using=None): """Return the perfectly balanced ``r``-ary tree of height ``h``. Parameters ---------- r : int Branching factor of the tree; each node will have ``r`` children. h : int Height of the tree. create_using : NetworkX graph Use the specified graph as the base for the generated tree; see the documentation of :func:`~networkx.empty_graph` for more information. Returns ------- G : NetworkX graph A balanced ``r``-ary tree of height ``h``. Notes ----- This is the rooted tree where all leaves are at distance ``h`` from the root. The root has degree ``r`` and all other internal nodes have degree ``r + 1``. Node labels are integers, starting from zero. A balanced tree is also known as a *complete ``r``-ary tree*. """ # The number of nodes in the balanced tree is `1 + r + ... + r^h`, # which is computed by using the closed-form formula for a geometric # sum with ratio `r`. In the special case that `r` is 1, the number # of nodes is simply `h + 1` (since the tree is actually a path # graph). if r == 1: n = h + 1 else: # This must be an integer if both `r` and `h` are integers. If # they are not, we force integer division anyway. n = (1 - r ** (h + 1)) // (1 - r) return full_rary_tree(r, n, create_using=create_using) def barbell_graph(m1,m2,create_using=None): """Return the Barbell Graph: two complete graphs connected by a path. For m1 > 1 and m2 >= 0. Two identical complete graphs K_{m1} form the left and right bells, and are connected by a path P_{m2}. The 2*m1+m2 nodes are numbered 0,...,m1-1 for the left barbell, m1,...,m1+m2-1 for the path, and m1+m2,...,2*m1+m2-1 for the right barbell. The 3 subgraphs are joined via the edges (m1-1,m1) and (m1+m2-1,m1+m2). If m2=0, this is merely two complete graphs joined together. This graph is an extremal example in David Aldous and Jim Fill's etext on Random Walks on Graphs. """ if create_using is not None and create_using.is_directed(): raise nx.NetworkXError("Directed Graph not supported") if m1<2: raise nx.NetworkXError(\ "Invalid graph description, m1 should be >=2") if m2<0: raise nx.NetworkXError(\ "Invalid graph description, m2 should be >=0") # left barbell G=complete_graph(m1,create_using) G.name="barbell_graph(%d,%d)"%(m1,m2) # connecting path G.add_nodes_from([v for v in range(m1,m1+m2-1)]) if m2>1: G.add_edges_from([(v,v+1) for v in range(m1,m1+m2-1)]) # right barbell G.add_edges_from( (u,v) for u in range(m1+m2,2*m1+m2) for v in range(u+1,2*m1+m2)) # connect it up G.add_edge(m1-1,m1) if m2>0: G.add_edge(m1+m2-1,m1+m2) return G def complete_graph(n,create_using=None): """ Return the complete graph K_n with n nodes. Node labels are the integers 0 to n-1. """ G=empty_graph(n,create_using) G.name="complete_graph(%d)"%(n) if n>1: if G.is_directed(): edges=itertools.permutations(range(n),2) else: edges=itertools.combinations(range(n),2) G.add_edges_from(edges) return G def circular_ladder_graph(n,create_using=None): """Return the circular ladder graph CL_n of length n. CL_n consists of two concentric n-cycles in which each of the n pairs of concentric nodes are joined by an edge. Node labels are the integers 0 to n-1 """ G=ladder_graph(n,create_using) G.name="circular_ladder_graph(%d)"%n G.add_edge(0,n-1) G.add_edge(n,2*n-1) return G def circulant_graph(n, offsets, create_using=None): """Generates the circulant graph Ci_n(x_1, x_2, ..., x_m) with n vertices. Returns ------- The graph Ci_n(x_1, ..., x_m) consisting of n vertices 0, ..., n-1 such that the vertex with label i is connected to the vertices labelled (i + x) and (i - x), for all x in x_1 up to x_m, with the indices taken modulo n. Parameters ---------- n : integer The number of vertices the generated graph is to contain. offsets : list of integers A list of vertex offsets, x_1 up to x_m, as described above. create_using : NetworkX graph type, optional Use specified type to construct graph (default = networkx.Graph) Examples -------- Many well-known graph families are subfamilies of the circulant graphs; for example, to generate the cycle graph on n points, we connect every vertex to every other at offset plus or minus one. For n = 10, >>> import networkx >>> G = networkx.generators.classic.circulant_graph(10, [1]) >>> edges = [ ... (0, 9), (0, 1), (1, 2), (2, 3), (3, 4), ... (4, 5), (5, 6), (6, 7), (7, 8), (8, 9)] ... >>> sorted(edges) == sorted(G.edges()) True Similarly, we can generate the complete graph on 5 points with the set of offsets [1, 2]: >>> G = networkx.generators.classic.circulant_graph(5, [1, 2]) >>> edges = [ ... (0, 1), (0, 2), (0, 3), (0, 4), (1, 2), ... (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] ... >>> sorted(edges) == sorted(G.edges()) True """ G = empty_graph(n, create_using) template = 'circulant_graph(%d, [%s])' G.name = template % (n, ', '.join(str(j) for j in offsets)) for i in range(n): for j in offsets: G.add_edge(i, (i - j) % n) G.add_edge(i, (i + j) % n) return G def cycle_graph(n,create_using=None): """Return the cycle graph C_n over n nodes. C_n is the n-path with two end-nodes connected. Node labels are the integers 0 to n-1 If create_using is a DiGraph, the direction is in increasing order. """ G=path_graph(n,create_using) G.name="cycle_graph(%d)"%n if n>1: G.add_edge(n-1,0) return G def dorogovtsev_goltsev_mendes_graph(n,create_using=None): """Return the hierarchically constructed Dorogovtsev-Goltsev-Mendes graph. n is the generation. See: arXiv:/cond-mat/0112143 by Dorogovtsev, Goltsev and Mendes. """ if create_using is not None: if create_using.is_directed(): raise nx.NetworkXError("Directed Graph not supported") if create_using.is_multigraph(): raise nx.NetworkXError("Multigraph not supported") G=empty_graph(0,create_using) G.name="Dorogovtsev-Goltsev-Mendes Graph" G.add_edge(0,1) if n==0: return G new_node = 2 # next node to be added for i in range(1,n+1): #iterate over number of generations. last_generation_edges = list(G.edges()) number_of_edges_in_last_generation = len(last_generation_edges) for j in range(0,number_of_edges_in_last_generation): G.add_edge(new_node,last_generation_edges[j][0]) G.add_edge(new_node,last_generation_edges[j][1]) new_node += 1 return G def empty_graph(n=0,create_using=None): """Return the empty graph with n nodes and zero edges. Node labels are the integers 0 to n-1 For example: >>> G=nx.empty_graph(10) >>> G.number_of_nodes() 10 >>> G.number_of_edges() 0 The variable create_using should point to a "graph"-like object that will be cleaned (nodes and edges will be removed) and refitted as an empty "graph" with n nodes with integer labels. This capability is useful for specifying the class-nature of the resulting empty "graph" (i.e. Graph, DiGraph, MyWeirdGraphClass, etc.). The variable create_using has two main uses: Firstly, the variable create_using can be used to create an empty digraph, network,etc. For example, >>> n=10 >>> G=nx.empty_graph(n,create_using=nx.DiGraph()) will create an empty digraph on n nodes. Secondly, one can pass an existing graph (digraph, pseudograph, etc.) via create_using. For example, if G is an existing graph (resp. digraph, pseudograph, etc.), then empty_graph(n,create_using=G) will empty G (i.e. delete all nodes and edges using G.clear() in base) and then add n nodes and zero edges, and return the modified graph (resp. digraph, pseudograph, etc.). See also create_empty_copy(G). """ if create_using is None: # default empty graph is a simple graph G=nx.Graph() else: G=create_using G.clear() G.add_nodes_from(range(n)) G.name="empty_graph(%d)"%n return G def grid_2d_graph(m,n,periodic=False,create_using=None): """ Return the 2d grid graph of mxn nodes, each connected to its nearest neighbors. Optional argument periodic=True will connect boundary nodes via periodic boundary conditions. """ G=empty_graph(0,create_using) G.name="grid_2d_graph" rows=range(m) columns=range(n) G.add_nodes_from( (i,j) for i in rows for j in columns ) G.add_edges_from( ((i,j),(i-1,j)) for i in rows for j in columns if i>0 ) G.add_edges_from( ((i,j),(i,j-1)) for i in rows for j in columns if j>0 ) if G.is_directed(): G.add_edges_from( ((i,j),(i+1,j)) for i in rows for j in columns if i<m-1 ) G.add_edges_from( ((i,j),(i,j+1)) for i in rows for j in columns if j<n-1 ) if periodic: if n>2: G.add_edges_from( ((i,0),(i,n-1)) for i in rows ) if G.is_directed(): G.add_edges_from( ((i,n-1),(i,0)) for i in rows ) if m>2: G.add_edges_from( ((0,j),(m-1,j)) for j in columns ) if G.is_directed(): G.add_edges_from( ((m-1,j),(0,j)) for j in columns ) G.name="periodic_grid_2d_graph(%d,%d)"%(m,n) return G def grid_graph(dim,periodic=False): """ Return the n-dimensional grid graph. The dimension is the length of the list 'dim' and the size in each dimension is the value of the list element. E.g. G=grid_graph(dim=[2,3]) produces a 2x3 grid graph. If periodic=True then join grid edges with periodic boundary conditions. """ dlabel="%s"%dim if dim==[]: G=empty_graph(0) G.name="grid_graph(%s)"%dim return G if not is_list_of_ints(dim): raise nx.NetworkXError("dim is not a list of integers") if min(dim)<=0: raise nx.NetworkXError(\ "dim is not a list of strictly positive integers") if periodic: func=cycle_graph else: func=path_graph dim=list(dim) current_dim=dim.pop() G=func(current_dim) while len(dim)>0: current_dim=dim.pop() # order matters: copy before it is cleared during the creation of Gnew Gold=G.copy() Gnew=func(current_dim) # explicit: create_using=None # This is so that we get a new graph of Gnew's class. G=nx.cartesian_product(Gnew,Gold) # graph G is done but has labels of the form (1,(2,(3,1))) # so relabel H=nx.relabel_nodes(G, flatten) H.name="grid_graph(%s)"%dlabel return H def hypercube_graph(n): """Return the n-dimensional hypercube. Node labels are the integers 0 to 2**n - 1. """ dim=n*[2] G=grid_graph(dim) G.name="hypercube_graph_(%d)"%n return G def ladder_graph(n,create_using=None): """Return the Ladder graph of length n. This is two rows of n nodes, with each pair connected by a single edge. Node labels are the integers 0 to 2*n - 1. """ if create_using is not None and create_using.is_directed(): raise nx.NetworkXError("Directed Graph not supported") G=empty_graph(2*n,create_using) G.name="ladder_graph_(%d)"%n G.add_edges_from([(v,v+1) for v in range(n-1)]) G.add_edges_from([(v,v+1) for v in range(n,2*n-1)]) G.add_edges_from([(v,v+n) for v in range(n)]) return G def lollipop_graph(m,n,create_using=None): """Return the Lollipop Graph; `K_m` connected to `P_n`. This is the Barbell Graph without the right barbell. For m>1 and n>=0, the complete graph K_m is connected to the path P_n. The resulting m+n nodes are labelled 0,...,m-1 for the complete graph and m,...,m+n-1 for the path. The 2 subgraphs are joined via the edge (m-1,m). If n=0, this is merely a complete graph. Node labels are the integers 0 to number_of_nodes - 1. (This graph is an extremal example in David Aldous and Jim Fill's etext on Random Walks on Graphs.) """ if create_using is not None and create_using.is_directed(): raise nx.NetworkXError("Directed Graph not supported") if m<2: raise nx.NetworkXError(\ "Invalid graph description, m should be >=2") if n<0: raise nx.NetworkXError(\ "Invalid graph description, n should be >=0") # the ball G=complete_graph(m,create_using) # the stick G.add_nodes_from([v for v in range(m,m+n)]) if n>1: G.add_edges_from([(v,v+1) for v in range(m,m+n-1)]) # connect ball to stick if m>0: G.add_edge(m-1,m) G.name="lollipop_graph(%d,%d)"%(m,n) return G def null_graph(create_using=None): """Return the Null graph with no nodes or edges. See empty_graph for the use of create_using. """ G=empty_graph(0,create_using) G.name="null_graph()" return G def path_graph(n,create_using=None): """Return the Path graph P_n of n nodes linearly connected by n-1 edges. Node labels are the integers 0 to n - 1. If create_using is a DiGraph then the edges are directed in increasing order. """ G=empty_graph(n,create_using) G.name="path_graph(%d)"%n G.add_edges_from([(v,v+1) for v in range(n-1)]) return G def star_graph(n,create_using=None): """ Return the Star graph with n+1 nodes: one center node, connected to n outer nodes. Node labels are the integers 0 to n. """ G=complete_bipartite_graph(1,n,create_using) G.name="star_graph(%d)"%n return G def trivial_graph(create_using=None): """ Return the Trivial graph with one node (with integer label 0) and no edges. """ G=empty_graph(1,create_using) G.name="trivial_graph()" return G def wheel_graph(n,create_using=None): """ Return the wheel graph: a single hub node connected to each node of the (n-1)-node cycle graph. Node labels are the integers 0 to n - 1. """ if n == 0: return nx.empty_graph(n, create_using=create_using) G=star_graph(n-1,create_using) G.name="wheel_graph(%d)"%n G.add_edges_from([(v,v+1) for v in range(1,n-1)]) if n>2: G.add_edge(1,n-1) return G def complete_multipartite_graph(*block_sizes): """Returns the complete multipartite graph with the specified block sizes. Parameters ---------- block_sizes : tuple of integers The number of vertices in each block of the multipartite graph. The length of this tuple is the number of blocks. Returns ------- G : NetworkX Graph Returns the complete multipartite graph with the specified block sizes. For each node, the node attribute ``'block'`` is an integer indicating which block contains the node. Examples -------- Creating a complete tripartite graph, with blocks of one, two, and three vertices, respectively. >>> import networkx as nx >>> G = nx.complete_multipartite_graph(1, 2, 3) >>> [G.node[u]['block'] for u in G] [0, 1, 1, 2, 2, 2] >>> list(G.edges(0)) [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)] >>> list(G.edges(2)) [(2, 0), (2, 3), (2, 4), (2, 5)] >>> list(G.edges(4)) [(4, 0), (4, 1), (4, 2)] Notes ----- This function generalizes several other graph generator functions. - If no block sizes are given, this returns the null graph. - If a single block size ``n`` is given, this returns the empty graph on ``n`` nodes. - If two block sizes ``m`` and ``n`` are given, this returns the complete bipartite graph on ``m + n`` nodes. - If block sizes ``1`` and ``n`` are given, this returns the star graph on ``n + 1`` nodes. See also -------- complete_bipartite_graph """ G = nx.empty_graph(sum(block_sizes)) # If block_sizes is (n1, n2, n3, ...), create pairs of the form (0, n1), # (n1, n1 + n2), (n1 + n2, n1 + n2 + n3), etc. extents = zip([0] + list(accumulate(block_sizes)), accumulate(block_sizes)) blocks = [range(start, end) for start, end in extents] for (i, block) in enumerate(blocks): G.add_nodes_from(block, block=i) # Across blocks, all vertices should be adjacent. We can use # itertools.combinations() because the complete multipartite graph is an # undirected graph. for block1, block2 in itertools.combinations(blocks, 2): G.add_edges_from(itertools.product(block1, block2)) G.name = 'complete_multiparite_graph{0}'.format(block_sizes) return G ```
[ { "content": "Repeat the code precisely:\n```python\n#!/usr/bin/env python\nfrom __future__ import absolute_import, division, print_function, \\\n unicode_literals\nimport os\nimport sys\nimport time\nimport socket\ntry:\n import urllib.request as urllib\nexcept ImportError:\n import urllib\nimport has...
[ { "content": "Repeat the code precisely:\n<|memory_start|>```python\n#!/usr/bin/env python\nfrom __future__ import absolute_import, division, print_function, \\\n unicode_literals\nimport os\nimport sys\nimport time\nimport socket\ntry:\n import urllib.request as urllib\nexcept ImportError:\n import ur...
```python #!/usr/bin/env python from __future__ import absolute_import, division, print_function, \ unicode_literals import os import sys import time import socket try: import urllib.request as urllib except ImportError: import urllib import hashlib import argparse import logging from zeroconf import ServiceInfo, Zeroconf try: from http.server import BaseHTTPRequestHandler, HTTPServer except ImportError: from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer from . import utils __all__ = ["put"] def validate_address(address): """ Validate IP address """ try: socket.inet_aton(address) return address except socket.error: raise argparse.ArgumentTypeError( "%s is not a valid IP address" % address ) class StateHTTPServer(HTTPServer): """ HTTP Server that knows a certain filename and can be set to remember if that file has been transferred using :class:`FileHandler` """ downloaded = False filename = "" basename = "" reporthook = None class FileHandler(BaseHTTPRequestHandler): """ Custom HTTP upload handler that allows one single filename to be requested. """ def do_GET(self): if self.path == urllib.pathname2url( os.path.join('/', self.server.basename) ): utils.logger.info("Peer found. Uploading...") full_path = os.path.join(os.curdir, self.server.filename) with open(full_path, 'rb') as fh: maxsize = os.path.getsize(full_path) self.send_response(200) self.send_header('Content-type', 'application/octet-stream') self.send_header('Content-length', maxsize) self.end_headers() i = 0 while True: data = fh.read(1024 * 8) # chunksize taken from urllib if not data: break self.wfile.write(data) if self.server.reporthook is not None: self.server.reporthook(i, 1024 * 8, maxsize) i += 1 self.server.downloaded = True else: self.send_response(404) self.end_headers() raise RuntimeError("Invalid request received. Aborting.") def log_message(self, format, *args): """ Suppress log messages by overloading this function """ return def cli(inargs=None): """ Commandline interface for sending files """ parser = argparse.ArgumentParser() parser.add_argument( '--port', '-p', type=int, nargs='?', help="The port to share the file on" ) parser.add_argument( '--address', '-a', nargs='?', type=validate_address, help="The address to share the file on" ) parser.add_argument( '--interface', '-i', nargs='?', help="The interface to share the file on" ) parser.add_argument( '--verbose', '-v', action='count', default=0, help="Verbose mode. Multiple -v options increase the verbosity" ) parser.add_argument( '--quiet', '-q', action='count', default=0, help="Quiet mode. Hides progess bar" ) parser.add_argument( '--timeout', '-t', type=int, metavar="SECONDS", help="Set timeout after which program aborts transfer" ) parser.add_argument( '--version', '-V', action='version', version='%%(prog)s %s' % utils.__version__ ) parser.add_argument( 'input', help="The file to share on the network" ) args = parser.parse_args(inargs) utils.enable_logger(args.verbose) try: if not os.path.isfile(args.input): raise ValueError( "File %s does not exist" % args.input ) if args.interface and args.address: raise ValueError( "You may only provide one of --address " "or --interface" ) with utils.Progresshook(args.input) as progress: put( args.input, interface=args.interface, address=args.address, port=args.port, reporthook=progress if args.quiet == 0 else None, timeout=args.timeout, ) except Exception as e: if args.verbose: raise utils.logger.error(e.message) sys.exit(1) def put( filename, interface=None, address=None, port=None, reporthook=None, timeout=None, ): """Send a file using the zget protocol. Parameters ---------- filename : string The filename to be transferred interface : string The network interface to use. Optional. address : string The network address to use. Optional. port : int The network port to use. Optional. reporthook : callable A hook that will be called during transfer. Handy for watching the transfer. See :code:`urllib.urlretrieve` for callback parameters. Optional. timeout : int Seconds to wait until process is aborted. A running transfer is not aborted even when timeout was hit. Optional. Raises ------- TimeoutException When a timeout occurred. """ if port is None: port = utils.config().getint('DEFAULT', 'port') if interface is None: interface = utils.config().get('DEFAULT', 'interface') if not 0 <= port <= 65535: raise ValueError("Port %d exceeds allowed range" % port) basename = os.path.basename(filename) filehash = hashlib.sha1(basename.encode('utf-8')).hexdigest() if interface is None: interface = utils.default_interface() if address is None: address = utils.ip_addr(interface) server = StateHTTPServer((address, port), FileHandler) server.timeout = timeout server.filename = filename server.basename = basename server.reporthook = reporthook port = server.server_port utils.logger.debug( "Using interface %s" % interface ) utils.logger.debug( "Listening on %s:%d \n" "you may change address using --address and " "port using --port" % (address, port) ) utils.logger.debug( "Broadcasting as %s._zget._http._tcp.local." % filehash ) info = ServiceInfo( "_zget._http._tcp.local.", "%s._zget._http._tcp.local." % filehash, socket.inet_aton(address), port, 0, 0, {'path': None} ) zeroconf = Zeroconf() try: zeroconf.register_service(info) server.handle_request() except KeyboardInterrupt: pass server.socket.close() zeroconf.unregister_service(info) zeroconf.close() if timeout is not None and not server.downloaded: raise utils.TimeoutException() else: utils.logger.info("Done.") if __name__ == '__main__': cli(sys.argv[1:]) ```
[ { "content": "Write the code verbatim:\n```python\n# -*- coding: utf-8 -*-\n\n__author__ = 'meanwhile'\n\nimport ssl\nimport time\nimport socket\nimport sys\nimport logging\n\nimport vkontakte\nimport ProvaderStorage\nimport Constants\nimport FileWriter\nimport ProccessingResponce\nimport daemon\n\n\nclass VkPa...
[ { "content": "Write the code verbatim:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n__author__ = 'meanwhile'\n\nimport ssl\nimport time\nimport socket\nimport sys\nimport logging\n\nimport vkontakte\nimport ProvaderStorage\nimport Constants\nimport FileWriter\nimport ProccessingResponce\nimport daemon...
```python # -*- coding: utf-8 -*- __author__ = 'meanwhile' import ssl import time import socket import sys import logging import vkontakte import ProvaderStorage import Constants import FileWriter import ProccessingResponce import daemon class VkParserDemon(daemon.Daemon): def run(self): #read code for method vk.executin from file codeFromFile = '' with open(Constants.Constants.getFileCodeExecute(), 'r') as f: codeFromFile = f.read() #read access token from file access_tokens = []; with open(Constants.Constants.getFileAccessToken(), 'r') as f: access_tokens = [token.strip() for token in f] isValidToken = False; for acces_token in access_tokens: try: vk = vkontakte.API(token=acces_token) vk.getServerTime() #проверяем соединилось ли isValidToken = True break except vkontakte.VKError, e: logging.error("vkontakte.VKError ") except ssl.SSLError, e: #The handshake operation timed out logging.error("ssl error") time.sleep(1) access_tokens.append(acces_token) if (isValidToken): storage = ProvaderStorage.ProvaderStorage() lastTime = vk.getServerTime() emptyLastTime = 0; while True: try: time.sleep(Constants.Constants.getTimeOutInSec()) codeSending = codeFromFile.replace('%time_replace%', str(lastTime)) json = vk.execute(code=codeSending, timeout=10) logging.debug("vk_json responce ", json) fileName = Constants.Constants.getDirHomeScript() + str(time.strftime("%d-%m-%Y")) + ".vkr" #vk raw file = FileWriter.FileWriterBinary(fileName) process = ProccessingResponce.ProccessingResponce(storage, file) process.jsonParse(json) if json['max_time'] > 0: lastTime = json['max_time'] + 1 else: logging.debug("empty json= ", json) logging.debug("lastTime= ", lastTime) logging.debug("complidet proccessing") except ssl.SSLError, e: logging.error("ssl error") except socket.timeout, e: logging.error("socket.timeout") except vkontakte.VKError, e: logging.error("vkontakte.VKError") except AttributeError, e: logging.error("AttributeError") else: #TODO: send emails tokens no correct logging.error("token uncorrect") if __name__ == "__main__": logging.basicConfig(format=u'%(filename)s[LINE:%(lineno)d]# %(levelname)-8s [%(asctime)s] %(message)s', level=logging.ERROR) daemon = VkParserDemon('/tmp/daemon-example.pid', stdout='/var/log/vk_parser/stdout.log', stderr='/var/log/vk_parser/error.log') if len(sys.argv) == 2: if 'start' == sys.argv[1]: daemon.start() elif 'stop' == sys.argv[1]: daemon.stop() elif 'restart' == sys.argv[1]: daemon.restart() else: print "Unknown command" sys.exit(2) sys.exit(0) else: print "usage: %s start|stop|restart" % sys.argv[0] sys.exit(2) ```
[ { "content": "Repeat the following code:\n```python\n# Copyright 2010 OpenStack Foundation\n# Copyright 2012 University Of Minho\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of ...
[ { "content": "Repeat the following code:\n<|memory_start|>```python\n# Copyright 2010 OpenStack Foundation\n# Copyright 2012 University Of Minho\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\...
```python # Copyright 2010 OpenStack Foundation # Copyright 2012 University Of Minho # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import deque from collections import OrderedDict import contextlib import copy import datetime import errno import glob import os import random import re import shutil import signal import threading import time import uuid import eventlet from eventlet import greenthread import fixtures from lxml import etree import mock from mox3 import mox from os_brick.initiator import connector import os_vif from oslo_concurrency import lockutils from oslo_concurrency import processutils from oslo_config import cfg from oslo_serialization import jsonutils from oslo_service import loopingcall from oslo_utils import encodeutils from oslo_utils import fileutils from oslo_utils import fixture as utils_fixture from oslo_utils import importutils from oslo_utils import units from oslo_utils import uuidutils from oslo_utils import versionutils import six from six.moves import builtins from six.moves import range from nova.api.metadata import base as instance_metadata from nova.compute import arch from nova.compute import cpumodel from nova.compute import manager from nova.compute import power_state from nova.compute import task_states from nova.compute import vm_mode from nova.compute import vm_states import nova.conf from nova import context from nova import db from nova import exception from nova.network import model as network_model from nova import objects from nova.objects import block_device as block_device_obj from nova.objects import fields from nova.objects import migrate_data as migrate_data_obj from nova.objects import virtual_interface as obj_vif from nova.pci import manager as pci_manager from nova.pci import utils as pci_utils from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance from nova.tests.unit import fake_network import nova.tests.unit.image.fake from nova.tests.unit import matchers from nova.tests.unit.objects import test_pci_device from nova.tests.unit.objects import test_vcpu_model from nova.tests.unit.virt.libvirt import fake_imagebackend from nova.tests.unit.virt.libvirt import fake_libvirt_utils from nova.tests.unit.virt.libvirt import fakelibvirt from nova.tests import uuidsentinel as uuids from nova import utils from nova import version from nova.virt import block_device as driver_block_device from nova.virt.disk import api as disk_api from nova.virt import driver from nova.virt import fake from nova.virt import firewall as base_firewall from nova.virt import hardware from nova.virt.image import model as imgmodel from nova.virt import images from nova.virt.libvirt import blockinfo from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import driver as libvirt_driver from nova.virt.libvirt import firewall from nova.virt.libvirt import guest as libvirt_guest from nova.virt.libvirt import host from nova.virt.libvirt import imagebackend from nova.virt.libvirt import imagecache from nova.virt.libvirt import migration as libvirt_migrate from nova.virt.libvirt.storage import dmcrypt from nova.virt.libvirt.storage import lvm from nova.virt.libvirt.storage import rbd_utils from nova.virt.libvirt import utils as libvirt_utils from nova.virt.libvirt.volume import volume as volume_drivers libvirt_driver.libvirt = fakelibvirt host.libvirt = fakelibvirt libvirt_guest.libvirt = fakelibvirt libvirt_migrate.libvirt = fakelibvirt CONF = nova.conf.CONF _fake_network_info = fake_network.fake_get_instance_nw_info _fake_NodeDevXml = \ {"pci_0000_04_00_3": """ <device> <name>pci_0000_04_00_3</name> <parent>pci_0000_00_01_1</parent> <driver> <name>igb</name> </driver> <capability type='pci'> <domain>0</domain> <bus>4</bus> <slot>0</slot> <function>3</function> <product id='0x1521'>I350 Gigabit Network Connection</product> <vendor id='0x8086'>Intel Corporation</vendor> <capability type='virt_functions'> <address domain='0x0000' bus='0x04' slot='0x10' function='0x3'/> <address domain='0x0000' bus='0x04' slot='0x10' function='0x7'/> <address domain='0x0000' bus='0x04' slot='0x11' function='0x3'/> <address domain='0x0000' bus='0x04' slot='0x11' function='0x7'/> </capability> </capability> </device>""", "pci_0000_04_10_7": """ <device> <name>pci_0000_04_10_7</name> <parent>pci_0000_00_01_1</parent> <driver> <name>igbvf</name> </driver> <capability type='pci'> <domain>0</domain> <bus>4</bus> <slot>16</slot> <function>7</function> <product id='0x1520'>I350 Ethernet Controller Virtual Function </product> <vendor id='0x8086'>Intel Corporation</vendor> <capability type='phys_function'> <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/> </capability> <capability type='virt_functions'> </capability> </capability> </device>""", "pci_0000_04_11_7": """ <device> <name>pci_0000_04_11_7</name> <parent>pci_0000_00_01_1</parent> <driver> <name>igbvf</name> </driver> <capability type='pci'> <domain>0</domain> <bus>4</bus> <slot>17</slot> <function>7</function> <product id='0x1520'>I350 Ethernet Controller Virtual Function </product> <vendor id='0x8086'>Intel Corporation</vendor> <numa node='0'/> <capability type='phys_function'> <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/> </capability> <capability type='virt_functions'> </capability> </capability> </device>""", "pci_0000_04_00_1": """ <device> <name>pci_0000_04_00_1</name> <path>/sys/devices/pci0000:00/0000:00:02.0/0000:04:00.1</path> <parent>pci_0000_00_02_0</parent> <driver> <name>mlx5_core</name> </driver> <capability type='pci'> <domain>0</domain> <bus>4</bus> <slot>0</slot> <function>1</function> <product id='0x1013'>MT27700 Family [ConnectX-4]</product> <vendor id='0x15b3'>Mellanox Technologies</vendor> <iommuGroup number='15'> <address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/> <address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/> </iommuGroup> <numa node='0'/> <pci-express> <link validity='cap' port='0' speed='8' width='16'/> <link validity='sta' speed='8' width='16'/> </pci-express> </capability> </device>""", # libvirt >= 1.3.0 nodedev-dumpxml "pci_0000_03_00_0": """ <device> <name>pci_0000_03_00_0</name> <path>/sys/devices/pci0000:00/0000:00:02.0/0000:03:00.0</path> <parent>pci_0000_00_02_0</parent> <driver> <name>mlx5_core</name> </driver> <capability type='pci'> <domain>0</domain> <bus>3</bus> <slot>0</slot> <function>0</function> <product id='0x1013'>MT27700 Family [ConnectX-4]</product> <vendor id='0x15b3'>Mellanox Technologies</vendor> <capability type='virt_functions' maxCount='16'> <address domain='0x0000' bus='0x03' slot='0x00' function='0x2'/> <address domain='0x0000' bus='0x03' slot='0x00' function='0x3'/> <address domain='0x0000' bus='0x03' slot='0x00' function='0x4'/> <address domain='0x0000' bus='0x03' slot='0x00' function='0x5'/> </capability> <iommuGroup number='15'> <address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/> <address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/> </iommuGroup> <numa node='0'/> <pci-express> <link validity='cap' port='0' speed='8' width='16'/> <link validity='sta' speed='8' width='16'/> </pci-express> </capability> </device>""", "pci_0000_03_00_1": """ <device> <name>pci_0000_03_00_1</name> <path>/sys/devices/pci0000:00/0000:00:02.0/0000:03:00.1</path> <parent>pci_0000_00_02_0</parent> <driver> <name>mlx5_core</name> </driver> <capability type='pci'> <domain>0</domain> <bus>3</bus> <slot>0</slot> <function>1</function> <product id='0x1013'>MT27700 Family [ConnectX-4]</product> <vendor id='0x15b3'>Mellanox Technologies</vendor> <capability type='virt_functions' maxCount='16'/> <iommuGroup number='15'> <address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/> <address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/> </iommuGroup> <numa node='0'/> <pci-express> <link validity='cap' port='0' speed='8' width='16'/> <link validity='sta' speed='8' width='16'/> </pci-express> </capability> </device>""", } _fake_cpu_info = { "arch": "test_arch", "model": "test_model", "vendor": "test_vendor", "topology": { "sockets": 1, "cores": 8, "threads": 16 }, "features": ["feature1", "feature2"] } eph_default_ext = utils.get_hash_str(disk_api._DEFAULT_FILE_SYSTEM)[:7] def eph_name(size): return ('ephemeral_%(size)s_%(ext)s' % {'size': size, 'ext': eph_default_ext}) def fake_disk_info_byname(instance, type='qcow2'): """Return instance_disk_info corresponding accurately to the properties of the given Instance object. The info is returned as an OrderedDict of name->disk_info for each disk. :param instance: The instance we're generating fake disk_info for. :param type: libvirt's disk type. :return: disk_info :rtype: OrderedDict """ instance_dir = os.path.join(CONF.instances_path, instance.uuid) def instance_path(name): return os.path.join(instance_dir, name) disk_info = OrderedDict() # root disk if instance.image_ref is not None: cache_name = imagecache.get_cache_fname(instance.image_ref) disk_info['disk'] = { 'type': type, 'path': instance_path('disk'), 'virt_disk_size': instance.flavor.root_gb * units.Gi, 'backing_file': cache_name, 'disk_size': instance.flavor.root_gb * units.Gi, 'over_committed_disk_size': 0} swap_mb = instance.flavor.swap if swap_mb > 0: disk_info['disk.swap'] = { 'type': type, 'path': instance_path('disk.swap'), 'virt_disk_size': swap_mb * units.Mi, 'backing_file': 'swap_%s' % swap_mb, 'disk_size': swap_mb * units.Mi, 'over_committed_disk_size': 0} eph_gb = instance.flavor.ephemeral_gb if eph_gb > 0: disk_info['disk.local'] = { 'type': type, 'path': instance_path('disk.local'), 'virt_disk_size': eph_gb * units.Gi, 'backing_file': eph_name(eph_gb), 'disk_size': eph_gb * units.Gi, 'over_committed_disk_size': 0} if instance.config_drive: disk_info['disk.config'] = { 'type': 'raw', 'path': instance_path('disk.config'), 'virt_disk_size': 1024, 'backing_file': '', 'disk_size': 1024, 'over_committed_disk_size': 0} return disk_info def fake_disk_info_json(instance, type='qcow2'): """Return fake instance_disk_info corresponding accurately to the properties of the given Instance object. :param instance: The instance we're generating fake disk_info for. :param type: libvirt's disk type. :return: JSON representation of instance_disk_info for all disks. :rtype: str """ disk_info = fake_disk_info_byname(instance, type) return jsonutils.dumps(disk_info.values()) def _concurrency(signal, wait, done, target, is_block_dev=False): signal.send() wait.wait() done.send() class FakeVirtDomain(object): def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None): if uuidstr is None: uuidstr = str(uuid.uuid4()) self.uuidstr = uuidstr self.id = id self.domname = name self._info = [power_state.RUNNING, 2048 * units.Mi, 1234 * units.Mi, None, None] if fake_xml: self._fake_dom_xml = fake_xml else: self._fake_dom_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> </disk> </devices> </domain> """ def name(self): if self.domname is None: return "fake-domain %s" % self else: return self.domname def ID(self): return self.id def info(self): return self._info def create(self): pass def managedSave(self, *args): pass def createWithFlags(self, launch_flags): pass def XMLDesc(self, flags): return self._fake_dom_xml def UUIDString(self): return self.uuidstr def attachDeviceFlags(self, xml, flags): pass def attachDevice(self, xml): pass def detachDeviceFlags(self, xml, flags): pass def snapshotCreateXML(self, xml, flags): pass def blockCommit(self, disk, base, top, bandwidth=0, flags=0): pass def blockRebase(self, disk, base, bandwidth=0, flags=0): pass def blockJobInfo(self, path, flags): pass def resume(self): pass def destroy(self): pass def fsFreeze(self, disks=None, flags=0): pass def fsThaw(self, disks=None, flags=0): pass def isActive(self): return True class CacheConcurrencyTestCase(test.NoDBTestCase): def setUp(self): super(CacheConcurrencyTestCase, self).setUp() self.flags(instances_path=self.useFixture(fixtures.TempDir()).path) # utils.synchronized() will create the lock_path for us if it # doesn't already exist. It will also delete it when it's done, # which can cause race conditions with the multiple threads we # use for tests. So, create the path here so utils.synchronized() # won't delete it out from under one of the threads. self.lock_path = os.path.join(CONF.instances_path, 'locks') fileutils.ensure_tree(self.lock_path) def fake_exists(fname): basedir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) if fname == basedir or fname == self.lock_path: return True return False def fake_execute(*args, **kwargs): pass def fake_extend(image, size, use_cow=False): pass self.stub_out('os.path.exists', fake_exists) self.stubs.Set(utils, 'execute', fake_execute) self.stubs.Set(imagebackend.disk, 'extend', fake_extend) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) def _fake_instance(self, uuid): return objects.Instance(id=1, uuid=uuid) def test_same_fname_concurrency(self): # Ensures that the same fname cache runs at a sequentially. uuid = uuidutils.generate_uuid() backend = imagebackend.Backend(False) wait1 = eventlet.event.Event() done1 = eventlet.event.Event() sig1 = eventlet.event.Event() thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname', None, signal=sig1, wait=wait1, done=done1) eventlet.sleep(0) # Thread 1 should run before thread 2. sig1.wait() wait2 = eventlet.event.Event() done2 = eventlet.event.Event() sig2 = eventlet.event.Event() thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname', None, signal=sig2, wait=wait2, done=done2) wait2.send() eventlet.sleep(0) try: self.assertFalse(done2.ready()) finally: wait1.send() done1.wait() eventlet.sleep(0) self.assertTrue(done2.ready()) # Wait on greenthreads to assert they didn't raise exceptions # during execution thr1.wait() thr2.wait() def test_different_fname_concurrency(self): # Ensures that two different fname caches are concurrent. uuid = uuidutils.generate_uuid() backend = imagebackend.Backend(False) wait1 = eventlet.event.Event() done1 = eventlet.event.Event() sig1 = eventlet.event.Event() thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname2', None, signal=sig1, wait=wait1, done=done1) eventlet.sleep(0) # Thread 1 should run before thread 2. sig1.wait() wait2 = eventlet.event.Event() done2 = eventlet.event.Event() sig2 = eventlet.event.Event() thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname1', None, signal=sig2, wait=wait2, done=done2) eventlet.sleep(0) # Wait for thread 2 to start. sig2.wait() wait2.send() tries = 0 while not done2.ready() and tries < 10: eventlet.sleep(0) tries += 1 try: self.assertTrue(done2.ready()) finally: wait1.send() eventlet.sleep(0) # Wait on greenthreads to assert they didn't raise exceptions # during execution thr1.wait() thr2.wait() class FakeVolumeDriver(object): def __init__(self, *args, **kwargs): pass def attach_volume(self, *args): pass def detach_volume(self, *args): pass def get_xml(self, *args): return "" def get_config(self, *args): """Connect the volume to a fake device.""" conf = vconfig.LibvirtConfigGuestDisk() conf.source_type = "network" conf.source_protocol = "fake" conf.source_name = "fake" conf.target_dev = "fake" conf.target_bus = "fake" return conf def connect_volume(self, *args): """Connect the volume to a fake device.""" pass class FakeConfigGuestDisk(object): def __init__(self, *args, **kwargs): self.source_type = None self.driver_cache = None class FakeConfigGuest(object): def __init__(self, *args, **kwargs): self.driver_cache = None class FakeNodeDevice(object): def __init__(self, fakexml): self.xml = fakexml def XMLDesc(self, flags): return self.xml def _create_test_instance(): flavor = objects.Flavor(memory_mb=2048, swap=0, vcpu_weight=None, root_gb=10, id=2, name=u'm1.small', ephemeral_gb=20, rxtx_factor=1.0, flavorid=u'1', vcpus=2, extra_specs={}) return { 'id': 1, 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310', 'memory_kb': '1024000', 'basepath': '/some/path', 'bridge_name': 'br100', 'display_name': "Acme webserver", 'vcpus': 2, 'project_id': 'fake', 'bridge': 'br101', 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'root_gb': 10, 'ephemeral_gb': 20, 'instance_type_id': '5', # m1.small 'extra_specs': {}, 'system_metadata': { 'image_disk_format': 'raw', }, 'flavor': flavor, 'new_flavor': None, 'old_flavor': None, 'pci_devices': objects.PciDeviceList(), 'numa_topology': None, 'config_drive': None, 'vm_mode': None, 'kernel_id': None, 'ramdisk_id': None, 'os_type': 'linux', 'user_id': '838a72b0-0d54-4827-8fd6-fb1227633ceb', 'ephemeral_key_uuid': None, 'vcpu_model': None, 'host': 'fake-host', 'task_state': None, } class LibvirtConnTestCase(test.NoDBTestCase): REQUIRES_LOCKING = True _EPHEMERAL_20_DEFAULT = eph_name(20) def setUp(self): super(LibvirtConnTestCase, self).setUp() self.user_id = 'fake' self.project_id = 'fake' self.context = context.get_admin_context() temp_dir = self.useFixture(fixtures.TempDir()).path self.flags(instances_path=temp_dir) self.flags(snapshots_directory=temp_dir, group='libvirt') self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt_utils', fake_libvirt_utils)) self.flags(sysinfo_serial="hardware", group="libvirt") # normally loaded during nova-compute startup os_vif.initialize() self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) def fake_extend(image, size, use_cow=False): pass self.stubs.Set(libvirt_driver.disk_api, 'extend', fake_extend) self.stubs.Set(imagebackend.Image, 'resolve_driver_format', imagebackend.Image._get_driver_format) self.useFixture(fakelibvirt.FakeLibvirtFixture()) self.test_instance = _create_test_instance() self.test_image_meta = { "disk_format": "raw", } self.image_service = nova.tests.unit.image.fake.stub_out_image_service( self) self.device_xml_tmpl = """ <domain type='kvm'> <devices> <disk type='block' device='disk'> <driver name='qemu' type='raw' cache='none'/> <source dev='{device_path}'/> <target bus='virtio' dev='vdb'/> <serial>58a84f6d-3f0c-4e19-a0af-eb657b790657</serial> <address type='pci' domain='0x0' bus='0x0' slot='0x04' \ function='0x0'/> </disk> </devices> </domain> """ def relpath(self, path): return os.path.relpath(path, CONF.instances_path) def tearDown(self): nova.tests.unit.image.fake.FakeImageService_reset() super(LibvirtConnTestCase, self).tearDown() def test_driver_capabilities(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertTrue(drvr.capabilities['has_imagecache'], 'Driver capabilities for \'has_imagecache\' ' 'is invalid') self.assertTrue(drvr.capabilities['supports_recreate'], 'Driver capabilities for \'supports_recreate\' ' 'is invalid') self.assertFalse(drvr.capabilities['supports_migrate_to_same_host'], 'Driver capabilities for ' '\'supports_migrate_to_same_host\' is invalid') self.assertTrue(drvr.capabilities['supports_attach_interface'], 'Driver capabilities for ' '\'supports_attach_interface\' ' 'is invalid') def create_fake_libvirt_mock(self, **kwargs): """Defining mocks for LibvirtDriver(libvirt is not used).""" # A fake libvirt.virConnect class FakeLibvirtDriver(object): def defineXML(self, xml): return FakeVirtDomain() # Creating mocks volume_driver = ['iscsi=nova.tests.unit.virt.libvirt.test_driver' '.FakeVolumeDriver'] fake = FakeLibvirtDriver() # Customizing above fake if necessary for key, val in kwargs.items(): fake.__setattr__(key, val) self.stubs.Set(libvirt_driver.LibvirtDriver, '_conn', fake) self.stubs.Set(libvirt_driver.LibvirtDriver, '_get_volume_drivers', lambda x: volume_driver) self.stubs.Set(host.Host, 'get_connection', lambda x: fake) def fake_lookup(self, instance_name): return FakeVirtDomain() def fake_execute(self, *args, **kwargs): open(args[-1], "a").close() def _create_service(self, **kwargs): service_ref = {'host': kwargs.get('host', 'dummy'), 'disabled': kwargs.get('disabled', False), 'binary': 'nova-compute', 'topic': 'compute', 'report_count': 0} return objects.Service(**service_ref) def _get_pause_flag(self, drvr, network_info, power_on=True, vifs_already_plugged=False): timeout = CONF.vif_plugging_timeout events = [] if (drvr._conn_supports_start_paused and utils.is_neutron() and not vifs_already_plugged and power_on and timeout): events = drvr._get_neutron_events(network_info) return bool(events) def test_public_api_signatures(self): baseinst = driver.ComputeDriver(None) inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertPublicAPISignatures(baseinst, inst) def test_legacy_block_device_info(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertFalse(drvr.need_legacy_block_device_info) @mock.patch.object(host.Host, "has_min_version") def test_min_version_start_ok(self, mock_version): mock_version.return_value = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") @mock.patch.object(host.Host, "has_min_version") def test_min_version_start_abort(self, mock_version): mock_version.return_value = False drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) - 1) @mock.patch.object(libvirt_driver.LOG, 'warning') def test_next_min_version_deprecation_warning(self, mock_warning, mock_get_libversion): # Skip test if there's no currently planned new min version if (versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) == versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_VERSION)): self.skipTest("NEXT_MIN_LIBVIRT_VERSION == MIN_LIBVIRT_VERSION") # Test that a warning is logged if the libvirt version is less than # the next required minimum version. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") # assert that the next min version is in a warning message expected_arg = {'version': versionutils.convert_version_to_str( versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))} version_arg_found = False for call in mock_warning.call_args_list: if call[0][1] == expected_arg: version_arg_found = True break self.assertTrue(version_arg_found) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_QEMU_VERSION) - 1) @mock.patch.object(libvirt_driver.LOG, 'warning') def test_next_min_qemu_version_deprecation_warning(self, mock_warning, mock_get_libversion): # Skip test if there's no currently planned new min version if (versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_QEMU_VERSION) == versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_VERSION)): self.skipTest("NEXT_MIN_QEMU_VERSION == MIN_QEMU_VERSION") # Test that a warning is logged if the libvirt version is less than # the next required minimum version. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") # assert that the next min version is in a warning message expected_arg = {'version': versionutils.convert_version_to_str( versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_QEMU_VERSION))} version_arg_found = False for call in mock_warning.call_args_list: if call[0][1] == expected_arg: version_arg_found = True break self.assertTrue(version_arg_found) @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION)) @mock.patch.object(libvirt_driver.LOG, 'warning') def test_next_min_version_ok(self, mock_warning, mock_get_libversion): # Skip test if there's no currently planned new min version if (versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) == versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_VERSION)): self.skipTest("NEXT_MIN_LIBVIRT_VERSION == MIN_LIBVIRT_VERSION") # Test that a warning is not logged if the libvirt version is greater # than or equal to NEXT_MIN_LIBVIRT_VERSION. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") # assert that the next min version is in a warning message expected_arg = {'version': versionutils.convert_version_to_str( versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))} version_arg_found = False for call in mock_warning.call_args_list: if call[0][1] == expected_arg: version_arg_found = True break self.assertFalse(version_arg_found) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_QEMU_VERSION)) @mock.patch.object(libvirt_driver.LOG, 'warning') def test_next_min_qemu_version_ok(self, mock_warning, mock_get_libversion): # Skip test if there's no currently planned new min version if (versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_QEMU_VERSION) == versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_VERSION)): self.skipTest("NEXT_MIN_QEMU_VERSION == MIN_QEMU_VERSION") # Test that a warning is not logged if the libvirt version is greater # than or equal to NEXT_MIN_QEMU_VERSION. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") # assert that the next min version is in a warning message expected_arg = {'version': versionutils.convert_version_to_str( versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_QEMU_VERSION))} version_arg_found = False for call in mock_warning.call_args_list: if call[0][1] == expected_arg: version_arg_found = True break self.assertFalse(version_arg_found) @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.PPC64)) - 1) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.PPC64))) @mock.patch.object(arch, "from_host", return_value=arch.PPC64) def test_min_version_ppc_old_libvirt(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.PPC64))) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.PPC64)) - 1) @mock.patch.object(arch, "from_host", return_value=arch.PPC64) def test_min_version_ppc_old_qemu(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.PPC64))) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.PPC64))) @mock.patch.object(arch, "from_host", return_value=arch.PPC64) def test_min_version_ppc_ok(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.S390X)) - 1) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.S390X))) @mock.patch.object(arch, "from_host", return_value=arch.S390X) def test_min_version_s390_old_libvirt(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.S390X))) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.S390X)) - 1) @mock.patch.object(arch, "from_host", return_value=arch.S390X) def test_min_version_s390_old_qemu(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.S390X))) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.S390X))) @mock.patch.object(arch, "from_host", return_value=arch.S390X) def test_min_version_s390_ok(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") def _do_test_parse_migration_flags(self, lm_expected=None, bm_expected=None): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr._parse_migration_flags() if lm_expected is not None: self.assertEqual(lm_expected, drvr._live_migration_flags) if bm_expected is not None: self.assertEqual(bm_expected, drvr._block_migration_flags) def test_parse_live_migration_flags_default(self): self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE)) def test_parse_live_migration_flags(self): self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE)) def test_parse_block_migration_flags_default(self): self._do_test_parse_migration_flags( bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) def test_parse_block_migration_flags(self): self._do_test_parse_migration_flags( bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) def test_parse_migration_flags_p2p_xen(self): self.flags(virt_type='xen', group='libvirt') self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_LIVE), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) def test_live_migration_tunnelled_none(self): self.flags(live_migration_tunnelled=None, group='libvirt') self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED)) def test_live_migration_tunnelled_true(self): self.flags(live_migration_tunnelled=True, group='libvirt') self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED)) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_live_migration_permit_postcopy_true(self, host): self.flags(live_migration_permit_post_copy=True, group='libvirt') self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC | libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY)) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_live_migration_permit_auto_converge_true(self, host): self.flags(live_migration_permit_auto_converge=True, group='libvirt') self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC | libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE)) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_live_migration_permit_auto_converge_and_post_copy_true(self, host): self.flags(live_migration_permit_auto_converge=True, group='libvirt') self.flags(live_migration_permit_post_copy=True, group='libvirt') self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC | libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY)) @mock.patch.object(host.Host, 'has_min_version') def test_live_migration_auto_converge_and_post_copy_true_old_libvirt( self, mock_host): self.flags(live_migration_permit_auto_converge=True, group='libvirt') self.flags(live_migration_permit_post_copy=True, group='libvirt') def fake_has_min_version(lv_ver=None, hv_ver=None, hv_type=None): if (lv_ver == libvirt_driver.MIN_LIBVIRT_POSTCOPY_VERSION and hv_ver == libvirt_driver.MIN_QEMU_POSTCOPY_VERSION): return False return True mock_host.side_effect = fake_has_min_version self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC | libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE)) @mock.patch.object(host.Host, 'has_min_version', return_value=False) def test_live_migration_permit_postcopy_true_old_libvirt(self, host): self.flags(live_migration_permit_post_copy=True, group='libvirt') self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) @mock.patch.object(host.Host, 'has_min_version', return_value=False) def test_live_migration_permit_auto_converge_true_old_libvirt(self, host): self.flags(live_migration_permit_auto_converge=True, group='libvirt') self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) def test_live_migration_permit_postcopy_false(self): self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) def test_live_migration_permit_autoconverge_false(self): self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.set_admin_password(instance, "123") mock_guest.set_user_password.assert_called_once_with("root", "123") @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password_parallels(self, mock_get_guest, ver): self.flags(virt_type='parallels', group='libvirt') instance = objects.Instance(**self.test_instance) mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.set_admin_password(instance, "123") mock_guest.set_user_password.assert_called_once_with("root", "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password_windows(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) instance.os_type = "windows" mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.set_admin_password(instance, "123") mock_guest.set_user_password.assert_called_once_with( "Administrator", "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password_image(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes", "os_admin_user": "foo" }} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.set_admin_password(instance, "123") mock_guest.set_user_password.assert_called_once_with("foo", "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=False) def test_set_admin_password_bad_version(self, mock_svc, mock_image): instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} for hyp in ('kvm', 'parallels'): self.flags(virt_type=hyp, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.SetAdminPasswdNotSupported, drvr.set_admin_password, instance, "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_set_admin_password_bad_hyp(self, mock_svc, mock_image): self.flags(virt_type='lxc', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.SetAdminPasswdNotSupported, drvr.set_admin_password, instance, "123") @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_set_admin_password_guest_agent_not_running(self, mock_svc): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.QemuGuestAgentNotEnabled, drvr.set_admin_password, instance, "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password_error(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_guest.set_user_password.side_effect = ( fakelibvirt.libvirtError("error")) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.set_admin_password, instance, "123") @mock.patch.object(objects.Service, 'save') @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_disable(self, mock_svc, mock_save): # Tests disabling an enabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(False) self.assertTrue(svc.disabled) mock_save.assert_called_once_with() @mock.patch.object(objects.Service, 'save') @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_enable(self, mock_svc, mock_save): # Tests enabling a disabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(disabled=True, host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(True) # since disabled_reason is not set and not prefixed with "AUTO:", # service should not be enabled. mock_save.assert_not_called() self.assertTrue(svc.disabled) @mock.patch.object(objects.Service, 'save') @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_enable_state_enabled(self, mock_svc, mock_save): # Tests enabling an enabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(disabled=False, host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(True) self.assertFalse(svc.disabled) mock_save.assert_not_called() @mock.patch.object(objects.Service, 'save') @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_disable_state_disabled(self, mock_svc, mock_save): # Tests disabling a disabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(disabled=True, host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(False) mock_save.assert_not_called() self.assertTrue(svc.disabled) def test_set_host_enabled_swallows_exceptions(self): # Tests that set_host_enabled will swallow exceptions coming from the # db_api code so they don't break anything calling it, e.g. the # _get_new_connection method. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) with mock.patch.object(db, 'service_get_by_compute_host') as db_mock: # Make db.service_get_by_compute_host raise NovaException; this # is more robust than just raising ComputeHostNotFound. db_mock.side_effect = exception.NovaException drvr._set_host_enabled(False) @mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName") def test_prepare_pci_device(self, mock_lookup): pci_devices = [dict(hypervisor_name='xxx')] self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conn = drvr._host.get_connection() mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn) drvr._prepare_pci_devices_for_use(pci_devices) @mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName") @mock.patch.object(fakelibvirt.virNodeDevice, "dettach") def test_prepare_pci_device_exception(self, mock_detach, mock_lookup): pci_devices = [dict(hypervisor_name='xxx', id='id1', instance_uuid='uuid')] self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conn = drvr._host.get_connection() mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn) mock_detach.side_effect = fakelibvirt.libvirtError("xxxx") self.assertRaises(exception.PciDevicePrepareFailed, drvr._prepare_pci_devices_for_use, pci_devices) @mock.patch.object(host.Host, "has_min_version", return_value=False) def test_device_metadata(self, mock_version): xml = """ <domain> <name>dummy</name> <uuid>32dfcb37-5af1-552b-357c-be8c3aa38310</uuid> <memory>1048576</memory> <vcpu>1</vcpu> <os> <type arch='x86_64' machine='pc-i440fx-2.4'>hvm</type> </os> <devices> <disk type='block' device='disk'> <driver name='qemu' type='qcow2'/> <source dev='/dev/mapper/generic'/> <target dev='sda' bus='scsi'/> <address type='drive' controller='0' bus='0' target='0' unit='0'/> </disk> <disk type='block' device='disk'> <driver name='qemu' type='qcow2'/> <source dev='/dev/mapper/generic-1'/> <target dev='hda' bus='ide'/> <address type='drive' controller='0' bus='1' target='0' unit='0'/> </disk> <disk type='block' device='disk'> <driver name='qemu' type='qcow2'/> <source dev='/dev/mapper/generic-2'/> <target dev='hdb' bus='ide'/> <address type='drive' controller='0' bus='1' target='1' unit='1'/> </disk> <disk type='block' device='disk'> <driver name='qemu' type='qcow2'/> <source dev='/dev/mapper/aa1'/> <target dev='sdb' bus='usb'/> </disk> <disk type='block' device='disk'> <driver name='qemu' type='qcow2'/> <source dev='/var/lib/libvirt/images/centos'/> <backingStore/> <target dev='vda' bus='virtio'/> <boot order='1'/> <alias name='virtio-disk0'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/> </disk> <disk type='file' device='disk'> <driver name='qemu' type='qcow2' cache='none'/> <source file='/var/lib/libvirt/images/generic.qcow2'/> <target dev='vdb' bus='virtio'/> <address type='virtio-mmio'/> </disk> <disk type='file' device='disk'> <driver name='qemu' type='qcow2'/> <source file='/var/lib/libvirt/images/test.qcow2'/> <backingStore/> <target dev='vdc' bus='virtio'/> <alias name='virtio-disk1'/> <address type='ccw' cssid='0xfe' ssid='0x0' devno='0x0000'/> </disk> <interface type='network'> <mac address='52:54:00:f6:35:8f'/> <source network='default'/> <model type='virtio'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/> </interface> <interface type='network'> <mac address='51:5a:2c:a4:5e:1b'/> <source network='default'/> <model type='virtio'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x1'/> </interface> <interface type='network'> <mac address='fa:16:3e:d1:28:e4'/> <source network='default'/> <model type='virtio'/> <address type='virtio-mmio'/> </interface> <interface type='network'> <mac address='52:54:00:14:6f:50'/> <source network='default' bridge='virbr0'/> <target dev='vnet0'/> <model type='virtio'/> <alias name='net0'/> <address type='ccw' cssid='0xfe' ssid='0x0' devno='0x0001'/> </interface> </devices> </domain>""" drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) dom = fakelibvirt.Domain(drvr._get_connection(), xml, False) guest = libvirt_guest.Guest(dom) instance_ref = objects.Instance(**self.test_instance) bdms = block_device_obj.block_device_make_list_from_dicts( self.context, [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sda', 'tag': "db"}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/hda', 'tag': "nfvfunc1"}), fake_block_device.FakeDbBlockDeviceDict( {'id': 3, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sdb', 'tag': "nfvfunc2"}), fake_block_device.FakeDbBlockDeviceDict( {'id': 4, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/hdb'}), fake_block_device.FakeDbBlockDeviceDict( {'id': 5, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vda', 'tag': "nfvfunc3"}), fake_block_device.FakeDbBlockDeviceDict( {'id': 6, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vdb', 'tag': "nfvfunc4"}), fake_block_device.FakeDbBlockDeviceDict( {'id': 7, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vdc', 'tag': "nfvfunc5"}), ] ) vif = obj_vif.VirtualInterface(context=self.context) vif.address = '52:54:00:f6:35:8f' vif.network_id = 123 vif.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310' vif.uuid = '12ec4b21-ef22-6c21-534b-ba3e3ab3a311' vif.tag = 'mytag1' vif1 = obj_vif.VirtualInterface(context=self.context) vif1.address = '51:5a:2c:a4:5e:1b' vif1.network_id = 123 vif1.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310' vif1.uuid = 'abec4b21-ef22-6c21-534b-ba3e3ab3a312' vif1.tag = None vif2 = obj_vif.VirtualInterface(context=self.context) vif2.address = 'fa:16:3e:d1:28:e4' vif2.network_id = 123 vif2.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310' vif2.uuid = '645686e4-7086-4eab-8c2f-c41f017a1b16' vif2.tag = 'mytag2' vif3 = obj_vif.VirtualInterface(context=self.context) vif3.address = '52:54:00:14:6f:50' vif3.network_id = 123 vif3.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310' vif3.uuid = '99cc3604-782d-4a32-a27c-bc33ac56ce86' vif3.tag = 'mytag3' vifs = [vif, vif1, vif2, vif3] with test.nested( mock.patch('nova.objects.VirtualInterfaceList' '.get_by_instance_uuid', return_value=vifs), mock.patch('nova.objects.BlockDeviceMappingList' '.get_by_instance_uuid', return_value=bdms), mock.patch('nova.virt.libvirt.host.Host.get_guest', return_value=guest), mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc', return_value=xml)): metadata_obj = drvr._build_device_metadata(self.context, instance_ref) metadata = metadata_obj.devices self.assertEqual(9, len(metadata)) self.assertIsInstance(metadata[0], objects.DiskMetadata) self.assertIsInstance(metadata[0].bus, objects.SCSIDeviceBus) self.assertEqual(['db'], metadata[0].tags) self.assertFalse(metadata[0].bus.obj_attr_is_set('address')) self.assertEqual(['nfvfunc1'], metadata[1].tags) self.assertIsInstance(metadata[1], objects.DiskMetadata) self.assertIsInstance(metadata[1].bus, objects.IDEDeviceBus) self.assertEqual(['nfvfunc1'], metadata[1].tags) self.assertFalse(metadata[1].bus.obj_attr_is_set('address')) self.assertIsInstance(metadata[2], objects.DiskMetadata) self.assertIsInstance(metadata[2].bus, objects.USBDeviceBus) self.assertEqual(['nfvfunc2'], metadata[2].tags) self.assertFalse(metadata[2].bus.obj_attr_is_set('address')) self.assertIsInstance(metadata[3], objects.DiskMetadata) self.assertIsInstance(metadata[3].bus, objects.PCIDeviceBus) self.assertEqual(['nfvfunc3'], metadata[3].tags) self.assertEqual('0000:00:09.0', metadata[3].bus.address) self.assertIsInstance(metadata[4], objects.DiskMetadata) self.assertEqual(['nfvfunc4'], metadata[4].tags) self.assertIsInstance(metadata[5], objects.DiskMetadata) self.assertEqual(['nfvfunc5'], metadata[5].tags) self.assertIsInstance(metadata[6], objects.NetworkInterfaceMetadata) self.assertIsInstance(metadata[6].bus, objects.PCIDeviceBus) self.assertEqual(['mytag1'], metadata[6].tags) self.assertEqual('0000:00:03.0', metadata[6].bus.address) self.assertIsInstance(metadata[7], objects.NetworkInterfaceMetadata) self.assertEqual(['mytag2'], metadata[7].tags) self.assertIsInstance(metadata[8], objects.NetworkInterfaceMetadata) self.assertEqual(['mytag3'], metadata[8].tags) @mock.patch.object(host.Host, 'get_connection') @mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc') def test_detach_pci_devices(self, mocked_get_xml_desc, mock_conn): fake_domXML1_with_pci = ( """<domain> <devices> <disk type='file' device='disk'> <driver name='qemu' type='qcow2' cache='none'/> <source file='xxx'/> <target dev='vda' bus='virtio'/> <alias name='virtio-disk0'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/> </disk> <hostdev mode="subsystem" type="pci" managed="yes"> <source> <address function="0x1" slot="0x10" domain="0x0001" bus="0x04"/> </source> </hostdev></devices></domain>""") fake_domXML1_without_pci = ( """<domain> <devices> <disk type='file' device='disk'> <driver name='qemu' type='qcow2' cache='none'/> <source file='xxx'/> <target dev='vda' bus='virtio'/> <alias name='virtio-disk0'/> <address type='pci' domain='0x0001' bus='0x00' slot='0x04' function='0x0'/> </disk></devices></domain>""") pci_device_info = {'compute_node_id': 1, 'instance_uuid': 'uuid', 'address': '0001:04:10.1'} pci_device = objects.PciDevice(**pci_device_info) pci_devices = [pci_device] mocked_get_xml_desc.return_value = fake_domXML1_without_pci drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) dom = fakelibvirt.Domain( drvr._get_connection(), fake_domXML1_with_pci, False) guest = libvirt_guest.Guest(dom) drvr._detach_pci_devices(guest, pci_devices) @mock.patch.object(host.Host, 'get_connection') @mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc') def test_detach_pci_devices_timeout(self, mocked_get_xml_desc, mock_conn): fake_domXML1_with_pci = ( """<domain> <devices> <disk type='file' device='disk'> <driver name='qemu' type='qcow2' cache='none'/> <source file='xxx'/> <target dev='vda' bus='virtio'/> <alias name='virtio-disk0'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/> </disk> <hostdev mode="subsystem" type="pci" managed="yes"> <source> <address function="0x1" slot="0x10" domain="0x0001" bus="0x04"/> </source> </hostdev></devices></domain>""") pci_device_info = {'compute_node_id': 1, 'instance_uuid': 'uuid', 'address': '0001:04:10.1'} pci_device = objects.PciDevice(**pci_device_info) pci_devices = [pci_device] mocked_get_xml_desc.return_value = fake_domXML1_with_pci drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) dom = fakelibvirt.Domain( drvr._get_connection(), fake_domXML1_with_pci, False) guest = libvirt_guest.Guest(dom) self.assertRaises(exception.PciDeviceDetachFailed, drvr._detach_pci_devices, guest, pci_devices) @mock.patch.object(connector, 'get_connector_properties') def test_get_connector(self, fake_get_connector): initiator = 'fake.initiator.iqn' ip = 'fakeip' host = 'fakehost' wwpns = ['100010604b019419'] wwnns = ['200010604b019419'] self.flags(my_ip=ip) self.flags(host=host) expected = { 'ip': ip, 'initiator': initiator, 'host': host, 'wwpns': wwpns, 'wwnns': wwnns } volume = { 'id': 'fake' } # TODO(walter-boring) add the fake in os-brick fake_get_connector.return_value = expected drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) result = drvr.get_volume_connector(volume) self.assertThat(expected, matchers.DictMatches(result)) @mock.patch.object(connector, 'get_connector_properties') def test_get_connector_storage_ip(self, fake_get_connector): ip = '100.100.100.100' storage_ip = '101.101.101.101' self.flags(my_block_storage_ip=storage_ip, my_ip=ip) volume = { 'id': 'fake' } expected = { 'ip': storage_ip } # TODO(walter-boring) add the fake in os-brick fake_get_connector.return_value = expected drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) result = drvr.get_volume_connector(volume) self.assertEqual(storage_ip, result['ip']) def test_lifecycle_event_registration(self): calls = [] def fake_registerErrorHandler(*args, **kwargs): calls.append('fake_registerErrorHandler') def fake_get_host_capabilities(**args): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = arch.ARMV7 caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu calls.append('fake_get_host_capabilities') return caps @mock.patch.object(fakelibvirt, 'registerErrorHandler', side_effect=fake_registerErrorHandler) @mock.patch.object(host.Host, "get_capabilities", side_effect=fake_get_host_capabilities) def test_init_host(get_host_capabilities, register_error_handler): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("test_host") test_init_host() # NOTE(dkliban): Will fail if get_host_capabilities is called before # registerErrorHandler self.assertEqual(['fake_registerErrorHandler', 'fake_get_host_capabilities'], calls) def test_sanitize_log_to_xml(self): # setup fake data data = {'auth_password': 'scrubme'} bdm = [{'connection_info': {'data': data}}] bdi = {'block_device_mapping': bdm} # Tests that the parameters to the _get_guest_xml method # are sanitized for passwords when logged. def fake_debug(*args, **kwargs): if 'auth_password' in args[0]: self.assertNotIn('scrubme', args[0]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conf = mock.Mock() with test.nested( mock.patch.object(libvirt_driver.LOG, 'debug', side_effect=fake_debug), mock.patch.object(drvr, '_get_guest_config', return_value=conf) ) as ( debug_mock, conf_mock ): drvr._get_guest_xml(self.context, self.test_instance, network_info={}, disk_info={}, image_meta={}, block_device_info=bdi) # we don't care what the log message is, we just want to make sure # our stub method is called which asserts the password is scrubbed self.assertTrue(debug_mock.called) @mock.patch.object(time, "time") def test_get_guest_config(self, time_mock): time_mock.return_value = 1234567.89 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) test_instance = copy.deepcopy(self.test_instance) test_instance["display_name"] = "purple tomatoes" ctxt = context.RequestContext(project_id=123, project_name="aubergine", user_id=456, user_name="pie") flavor = objects.Flavor(name='m1.small', memory_mb=6, vcpus=28, root_gb=496, ephemeral_gb=8128, swap=33550336, extra_specs={}) instance_ref = objects.Instance(**test_instance) instance_ref.flavor = flavor image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info, context=ctxt) self.assertEqual(cfg.uuid, instance_ref["uuid"]) self.assertEqual(2, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertEqual(cfg.memory, 6 * units.Ki) self.assertEqual(cfg.vcpus, 28) self.assertEqual(cfg.os_type, vm_mode.HVM) self.assertEqual(cfg.os_boot_dev, ["hd"]) self.assertIsNone(cfg.os_root) self.assertEqual(len(cfg.devices), 10) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(len(cfg.metadata), 1) self.assertIsInstance(cfg.metadata[0], vconfig.LibvirtConfigGuestMetaNovaInstance) self.assertEqual(version.version_string_with_package(), cfg.metadata[0].package) self.assertEqual("purple tomatoes", cfg.metadata[0].name) self.assertEqual(1234567.89, cfg.metadata[0].creationTime) self.assertEqual("image", cfg.metadata[0].roottype) self.assertEqual(str(instance_ref["image_ref"]), cfg.metadata[0].rootid) self.assertIsInstance(cfg.metadata[0].owner, vconfig.LibvirtConfigGuestMetaNovaOwner) self.assertEqual(456, cfg.metadata[0].owner.userid) self.assertEqual("pie", cfg.metadata[0].owner.username) self.assertEqual(123, cfg.metadata[0].owner.projectid) self.assertEqual("aubergine", cfg.metadata[0].owner.projectname) self.assertIsInstance(cfg.metadata[0].flavor, vconfig.LibvirtConfigGuestMetaNovaFlavor) self.assertEqual("m1.small", cfg.metadata[0].flavor.name) self.assertEqual(6, cfg.metadata[0].flavor.memory) self.assertEqual(28, cfg.metadata[0].flavor.vcpus) self.assertEqual(496, cfg.metadata[0].flavor.disk) self.assertEqual(8128, cfg.metadata[0].flavor.ephemeral) self.assertEqual(33550336, cfg.metadata[0].flavor.swap) def test_get_guest_config_lxc(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, {'mapping': {}}) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory) self.assertEqual(instance_ref.flavor.vcpus, cfg.vcpus) self.assertEqual(vm_mode.EXE, cfg.os_type) self.assertEqual("/sbin/init", cfg.os_init_path) self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline) self.assertIsNone(cfg.os_root) self.assertEqual(3, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestFilesys) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) def test_get_guest_config_lxc_with_id_maps(self): self.flags(virt_type='lxc', group='libvirt') self.flags(uid_maps=['0:1000:100'], group='libvirt') self.flags(gid_maps=['0:1000:100'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, {'mapping': {}}) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory) self.assertEqual(instance_ref.vcpus, cfg.vcpus) self.assertEqual(vm_mode.EXE, cfg.os_type) self.assertEqual("/sbin/init", cfg.os_init_path) self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline) self.assertIsNone(cfg.os_root) self.assertEqual(3, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestFilesys) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) self.assertEqual(len(cfg.idmaps), 2) self.assertIsInstance(cfg.idmaps[0], vconfig.LibvirtConfigGuestUIDMap) self.assertIsInstance(cfg.idmaps[1], vconfig.LibvirtConfigGuestGIDMap) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_fits(self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps)): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_no_fit(self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([3])), mock.patch.object(random, 'choice') ) as (get_host_cap_mock, get_vcpu_pin_set_mock, choice_mock): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertFalse(choice_mock.called) self.assertEqual(set([3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) def _test_get_guest_memory_backing_config( self, host_topology, inst_topology, numatune): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) with mock.patch.object( drvr, "_get_host_numa_topology", return_value=host_topology): return drvr._get_guest_memory_backing_config( inst_topology, numatune, {}) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_get_guest_memory_backing_config_large_success(self, mock_version): host_topology = objects.NUMATopology( cells=[ objects.NUMACell( id=3, cpuset=set([1]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=2000, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0), objects.NUMAPagesTopology(size_kb=1048576, total=0, used=0), ])]) inst_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=3, cpuset=set([0, 1]), memory=1024, pagesize=2048)]) numa_tune = vconfig.LibvirtConfigGuestNUMATune() numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()] numa_tune.memnodes[0].cellid = 0 numa_tune.memnodes[0].nodeset = [3] result = self._test_get_guest_memory_backing_config( host_topology, inst_topology, numa_tune) self.assertEqual(1, len(result.hugepages)) self.assertEqual(2048, result.hugepages[0].size_kb) self.assertEqual([0], result.hugepages[0].nodeset) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_get_guest_memory_backing_config_smallest(self, mock_version): host_topology = objects.NUMATopology( cells=[ objects.NUMACell( id=3, cpuset=set([1]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=2000, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0), objects.NUMAPagesTopology(size_kb=1048576, total=0, used=0), ])]) inst_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=3, cpuset=set([0, 1]), memory=1024, pagesize=4)]) numa_tune = vconfig.LibvirtConfigGuestNUMATune() numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()] numa_tune.memnodes[0].cellid = 0 numa_tune.memnodes[0].nodeset = [3] result = self._test_get_guest_memory_backing_config( host_topology, inst_topology, numa_tune) self.assertIsNone(result) def test_get_guest_memory_backing_config_realtime(self): flavor = {"extra_specs": { "hw:cpu_realtime": "yes", "hw:cpu_policy": "dedicated" }} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) membacking = drvr._get_guest_memory_backing_config( None, None, flavor) self.assertTrue(membacking.locked) self.assertFalse(membacking.sharedpages) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_pci_no_numa_info( self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.AVAILABLE, address='0000:00:00.1', instance_uuid=None, request_id=None, extra_info={}, numa_node=None) pci_device = objects.PciDevice(**pci_device_info) with test.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object( host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([3])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), mock.patch.object(pci_manager, "get_instance_pci_devs", return_value=[pci_device])): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(set([3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_2pci_no_fit(self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.AVAILABLE, address='0000:00:00.1', instance_uuid=None, request_id=None, extra_info={}, numa_node=1) pci_device = objects.PciDevice(**pci_device_info) pci_device_info.update(numa_node=0, address='0000:00:00.2') pci_device2 = objects.PciDevice(**pci_device_info) with test.nested( mock.patch.object( host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([3])), mock.patch.object(random, 'choice'), mock.patch.object(pci_manager, "get_instance_pci_devs", return_value=[pci_device, pci_device2]) ) as (get_host_cap_mock, get_vcpu_pin_set_mock, choice_mock, pci_mock): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertFalse(choice_mock.called) self.assertEqual(set([3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') @mock.patch.object(host.Host, 'get_capabilities') @mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled') def _test_get_guest_config_numa_unsupported(self, fake_lib_version, fake_version, fake_type, fake_arch, exception_class, pagesize, mock_host, mock_caps, mock_lib_version, mock_version, mock_type): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=pagesize)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = fake_arch caps.host.topology = self._fake_caps_numa_topology() mock_type.return_value = fake_type mock_version.return_value = fake_version mock_lib_version.return_value = fake_lib_version mock_caps.return_value = caps drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises(exception_class, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_get_guest_config_numa_old_version_libvirt(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1, versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_old_version_libvirt_ppc(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION_PPC) - 1, versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.PPC64LE, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_bad_version_libvirt(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.BAD_LIBVIRT_NUMA_VERSIONS[0]), versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, None) @mock.patch.object(libvirt_driver.LOG, 'warning') def test_has_numa_support_bad_version_libvirt_log(self, mock_warn): # Tests that a warning is logged once and only once when there is a bad # BAD_LIBVIRT_NUMA_VERSIONS detected. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertFalse(hasattr(drvr, '_bad_libvirt_numa_version_warn')) with mock.patch.object(drvr._host, 'has_version', return_value=True): for i in range(2): self.assertFalse(drvr._has_numa_support()) self.assertTrue(drvr._bad_libvirt_numa_version_warn) self.assertEqual(1, mock_warn.call_count) # assert the version is logged properly self.assertEqual('1.2.9.2', mock_warn.call_args[0][1]) def test_get_guest_config_numa_old_version_qemu(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION), versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1, host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_other_arch_qemu(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION), versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.S390, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_xen(self): self.flags(virt_type='xen', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION), versionutils.convert_version_to_int((4, 5, 0)), 'XEN', arch.X86_64, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_old_pages_libvirt(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1, versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.X86_64, exception.MemoryPagesUnsupported, 2048) def test_get_guest_config_numa_old_pages_qemu(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION), versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1, host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, 2048) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset( self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1024, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology(kb_mem=4194304) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([2, 3])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))) ) as (has_min_version_mock, get_host_cap_mock, get_vcpu_pin_set_mock, get_online_cpus_mock): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) # NOTE(ndipanov): we make sure that pin_set was taken into account # when choosing viable cells self.assertEqual(set([2, 3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_non_numa_host_instance_topo(self, is_able): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024), objects.InstanceNUMACell( id=1, cpuset=set([2]), memory=1024)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps)): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.numatune) self.assertIsNotNone(cfg.cpu.numa) for instance_cell, numa_cfg_cell in zip( instance_topology.cells, cfg.cpu.numa.cells): self.assertEqual(instance_cell.id, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_topo(self, is_able): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=None), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=None)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([2, 3, 4, 5])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) # Test that the pinning is correct and limited to allowed only self.assertEqual(0, cfg.cputune.vcpupin[0].id) self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[0].cpuset) self.assertEqual(1, cfg.cputune.vcpupin[1].id) self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[1].cpuset) self.assertEqual(2, cfg.cputune.vcpupin[2].id) self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[2].cpuset) self.assertEqual(3, cfg.cputune.vcpupin[3].id) self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[3].cpuset) self.assertIsNotNone(cfg.cpu.numa) self.assertIsInstance(cfg.cputune.emulatorpin, vconfig.LibvirtConfigGuestCPUTuneEmulatorPin) self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset) for instance_cell, numa_cfg_cell, index in zip( instance_topology.cells, cfg.cpu.numa.cells, range(len(instance_topology.cells))): self.assertEqual(index, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) allnodes = [cell.id for cell in instance_topology.cells] self.assertEqual(allnodes, cfg.numatune.memory.nodeset) self.assertEqual("strict", cfg.numatune.memory.mode) for instance_cell, memnode, index in zip( instance_topology.cells, cfg.numatune.memnodes, range(len(instance_topology.cells))): self.assertEqual(index, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) def test_get_guest_config_numa_host_instance_topo_reordered(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=3, cpuset=set([0, 1]), memory=1024), objects.InstanceNUMACell( id=0, cpuset=set([2, 3]), memory=1024)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) # Test that the pinning is correct and limited to allowed only self.assertEqual(0, cfg.cputune.vcpupin[0].id) self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[0].cpuset) self.assertEqual(1, cfg.cputune.vcpupin[1].id) self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[1].cpuset) self.assertEqual(2, cfg.cputune.vcpupin[2].id) self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[2].cpuset) self.assertEqual(3, cfg.cputune.vcpupin[3].id) self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[3].cpuset) self.assertIsNotNone(cfg.cpu.numa) self.assertIsInstance(cfg.cputune.emulatorpin, vconfig.LibvirtConfigGuestCPUTuneEmulatorPin) self.assertEqual(set([0, 1, 6, 7]), cfg.cputune.emulatorpin.cpuset) for index, (instance_cell, numa_cfg_cell) in enumerate(zip( instance_topology.cells, cfg.cpu.numa.cells)): self.assertEqual(index, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) self.assertIsNone(numa_cfg_cell.memAccess) allnodes = set([cell.id for cell in instance_topology.cells]) self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset)) self.assertEqual("strict", cfg.numatune.memory.mode) for index, (instance_cell, memnode) in enumerate(zip( instance_topology.cells, cfg.numatune.memnodes)): self.assertEqual(index, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) def test_get_guest_config_numa_host_instance_topo_cpu_pinning(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, cpu_pinning={0: 24, 1: 25}), objects.InstanceNUMACell( id=0, cpuset=set([2, 3]), memory=1024, cpu_pinning={2: 0, 3: 1})]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology( sockets_per_cell=4, cores_per_socket=3, threads_per_core=2) conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) # Test that the pinning is correct and limited to allowed only self.assertEqual(0, cfg.cputune.vcpupin[0].id) self.assertEqual(set([24]), cfg.cputune.vcpupin[0].cpuset) self.assertEqual(1, cfg.cputune.vcpupin[1].id) self.assertEqual(set([25]), cfg.cputune.vcpupin[1].cpuset) self.assertEqual(2, cfg.cputune.vcpupin[2].id) self.assertEqual(set([0]), cfg.cputune.vcpupin[2].cpuset) self.assertEqual(3, cfg.cputune.vcpupin[3].id) self.assertEqual(set([1]), cfg.cputune.vcpupin[3].cpuset) self.assertIsNotNone(cfg.cpu.numa) # Emulator must be pinned to union of cfg.cputune.vcpupin[*].cpuset self.assertIsInstance(cfg.cputune.emulatorpin, vconfig.LibvirtConfigGuestCPUTuneEmulatorPin) self.assertEqual(set([0, 1, 24, 25]), cfg.cputune.emulatorpin.cpuset) for i, (instance_cell, numa_cfg_cell) in enumerate(zip( instance_topology.cells, cfg.cpu.numa.cells)): self.assertEqual(i, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) self.assertIsNone(numa_cfg_cell.memAccess) allnodes = set([cell.id for cell in instance_topology.cells]) self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset)) self.assertEqual("strict", cfg.numatune.memory.mode) for i, (instance_cell, memnode) in enumerate(zip( instance_topology.cells, cfg.numatune.memnodes)): self.assertEqual(i, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) def test_get_guest_config_numa_host_mempages_shared(self): instance_topology = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=2048), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=2048)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([2, 3, 4, 5])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for instance_cell, numa_cfg_cell, index in zip( instance_topology.cells, cfg.cpu.numa.cells, range(len(instance_topology.cells))): self.assertEqual(index, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) self.assertEqual("shared", numa_cfg_cell.memAccess) allnodes = [cell.id for cell in instance_topology.cells] self.assertEqual(allnodes, cfg.numatune.memory.nodeset) self.assertEqual("strict", cfg.numatune.memory.mode) for instance_cell, memnode, index in zip( instance_topology.cells, cfg.numatune.memnodes, range(len(instance_topology.cells))): self.assertEqual(index, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) self.assertEqual(0, len(cfg.cputune.vcpusched)) self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset) def test_get_guest_config_numa_host_instance_cpu_pinning_realtime(self): instance_topology = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=2048), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=2048)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={ "hw:cpu_realtime": "yes", "hw:cpu_policy": "dedicated", "hw:cpu_realtime_mask": "^0-1" }) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([2, 3, 4, 5])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for instance_cell, numa_cfg_cell, index in zip( instance_topology.cells, cfg.cpu.numa.cells, range(len(instance_topology.cells))): self.assertEqual(index, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) self.assertEqual("shared", numa_cfg_cell.memAccess) allnodes = [cell.id for cell in instance_topology.cells] self.assertEqual(allnodes, cfg.numatune.memory.nodeset) self.assertEqual("strict", cfg.numatune.memory.mode) for instance_cell, memnode, index in zip( instance_topology.cells, cfg.numatune.memnodes, range(len(instance_topology.cells))): self.assertEqual(index, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) self.assertEqual(1, len(cfg.cputune.vcpusched)) self.assertEqual("fifo", cfg.cputune.vcpusched[0].scheduler) self.assertEqual(set([2, 3]), cfg.cputune.vcpusched[0].vcpus) self.assertEqual(set([0, 1]), cfg.cputune.emulatorpin.cpuset) def test_get_cpu_numa_config_from_instance(self): topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128), objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128), ]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conf = drvr._get_cpu_numa_config_from_instance(topology, True) self.assertIsInstance(conf, vconfig.LibvirtConfigGuestCPUNUMA) self.assertEqual(0, conf.cells[0].id) self.assertEqual(set([1, 2]), conf.cells[0].cpus) self.assertEqual(131072, conf.cells[0].memory) self.assertEqual("shared", conf.cells[0].memAccess) self.assertEqual(1, conf.cells[1].id) self.assertEqual(set([3, 4]), conf.cells[1].cpus) self.assertEqual(131072, conf.cells[1].memory) self.assertEqual("shared", conf.cells[1].memAccess) def test_get_cpu_numa_config_from_instance_none(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conf = drvr._get_cpu_numa_config_from_instance(None, False) self.assertIsNone(conf) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support", return_value=True) def test_get_memnode_numa_config_from_instance(self, mock_numa): instance_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128), objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128), objects.InstanceNUMACell(id=16, cpuset=set([5, 6]), memory=128) ]) host_topology = objects.NUMATopology( cells=[ objects.NUMACell( id=0, cpuset=set([1, 2]), memory=1024, mempages=[]), objects.NUMACell( id=1, cpuset=set([3, 4]), memory=1024, mempages=[]), objects.NUMACell( id=16, cpuset=set([5, 6]), memory=1024, mempages=[])]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) with test.nested( mock.patch.object(drvr, "_get_host_numa_topology", return_value=host_topology)): guest_numa_config = drvr._get_guest_numa_config(instance_topology, flavor={}, allowed_cpus=[1, 2, 3, 4, 5, 6], image_meta={}) self.assertEqual(2, guest_numa_config.numatune.memnodes[2].cellid) self.assertEqual([16], guest_numa_config.numatune.memnodes[2].nodeset) self.assertEqual(set([5, 6]), guest_numa_config.numaconfig.cells[2].cpus) @mock.patch.object(host.Host, 'has_version', return_value=True) def test_has_cpu_policy_support(self, mock_has_version): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.CPUPinningNotSupported, drvr._has_cpu_policy_support) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support", return_value=True) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_hugepage_support", return_value=True) @mock.patch.object(host.Host, "get_capabilities") def test_does_not_want_hugepages(self, mock_caps, mock_hp, mock_numa): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_topology = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=4), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=4)]) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() mock_caps.return_value = caps host_topology = drvr._get_host_numa_topology() self.assertFalse(drvr._wants_hugepages(None, None)) self.assertFalse(drvr._wants_hugepages(host_topology, None)) self.assertFalse(drvr._wants_hugepages(None, instance_topology)) self.assertFalse(drvr._wants_hugepages(host_topology, instance_topology)) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support", return_value=True) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_hugepage_support", return_value=True) @mock.patch.object(host.Host, "get_capabilities") def test_does_want_hugepages(self, mock_caps, mock_hp, mock_numa): for each_arch in [arch.I686, arch.X86_64, arch.PPC64LE, arch.PPC64]: self._test_does_want_hugepages( mock_caps, mock_hp, mock_numa, each_arch) def _test_does_want_hugepages(self, mock_caps, mock_hp, mock_numa, architecture): self.flags(reserved_huge_pages=[ {'node': 0, 'size': 2048, 'count': 128}, {'node': 1, 'size': 2048, 'count': 1}, {'node': 3, 'size': 2048, 'count': 64}]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_topology = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=2048), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=2048)]) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = architecture caps.host.topology = self._fake_caps_numa_topology() mock_caps.return_value = caps host_topology = drvr._get_host_numa_topology() self.assertEqual(128, host_topology.cells[0].mempages[1].reserved) self.assertEqual(1, host_topology.cells[1].mempages[1].reserved) self.assertEqual(0, host_topology.cells[2].mempages[1].reserved) self.assertEqual(64, host_topology.cells[3].mempages[1].reserved) self.assertTrue(drvr._wants_hugepages(host_topology, instance_topology)) def test_get_guest_config_clock(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) hpet_map = { arch.X86_64: True, arch.I686: True, arch.PPC: False, arch.PPC64: False, arch.ARMV7: False, arch.AARCH64: False, } for guestarch, expect_hpet in hpet_map.items(): with mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch', return_value=guestarch): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "utc") self.assertIsInstance(cfg.clock.timers[0], vconfig.LibvirtConfigGuestTimer) self.assertIsInstance(cfg.clock.timers[1], vconfig.LibvirtConfigGuestTimer) self.assertEqual(cfg.clock.timers[0].name, "pit") self.assertEqual(cfg.clock.timers[0].tickpolicy, "delay") self.assertEqual(cfg.clock.timers[1].name, "rtc") self.assertEqual(cfg.clock.timers[1].tickpolicy, "catchup") if expect_hpet: self.assertEqual(3, len(cfg.clock.timers)) self.assertIsInstance(cfg.clock.timers[2], vconfig.LibvirtConfigGuestTimer) self.assertEqual('hpet', cfg.clock.timers[2].name) self.assertFalse(cfg.clock.timers[2].present) else: self.assertEqual(2, len(cfg.clock.timers)) @mock.patch.object(libvirt_utils, 'get_arch') @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_config_windows(self, mock_version, mock_get_arch): mock_version.return_value = False mock_get_arch.return_value = arch.I686 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['os_type'] = 'windows' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "localtime") self.assertEqual(3, len(cfg.clock.timers), cfg.clock.timers) self.assertEqual("pit", cfg.clock.timers[0].name) self.assertEqual("rtc", cfg.clock.timers[1].name) self.assertEqual("hpet", cfg.clock.timers[2].name) self.assertFalse(cfg.clock.timers[2].present) @mock.patch.object(libvirt_utils, 'get_arch') @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_config_windows_timer(self, mock_version, mock_get_arch): mock_version.return_value = True mock_get_arch.return_value = arch.I686 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['os_type'] = 'windows' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "localtime") self.assertEqual(4, len(cfg.clock.timers), cfg.clock.timers) self.assertEqual("pit", cfg.clock.timers[0].name) self.assertEqual("rtc", cfg.clock.timers[1].name) self.assertEqual("hpet", cfg.clock.timers[2].name) self.assertFalse(cfg.clock.timers[2].present) self.assertEqual("hypervclock", cfg.clock.timers[3].name) self.assertTrue(cfg.clock.timers[3].present) self.assertEqual(3, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertIsInstance(cfg.features[2], vconfig.LibvirtConfigGuestFeatureHyperV) @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_config_windows_hyperv_feature2(self, mock_version): mock_version.return_value = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['os_type'] = 'windows' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "localtime") self.assertEqual(3, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertIsInstance(cfg.features[2], vconfig.LibvirtConfigGuestFeatureHyperV) self.assertTrue(cfg.features[2].relaxed) self.assertTrue(cfg.features[2].spinlocks) self.assertEqual(8191, cfg.features[2].spinlock_retries) self.assertTrue(cfg.features[2].vapic) def test_get_guest_config_with_two_nics(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 2), image_meta, disk_info) self.assertEqual(2, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertEqual(cfg.memory, instance_ref.flavor.memory_mb * units.Ki) self.assertEqual(cfg.vcpus, instance_ref.flavor.vcpus) self.assertEqual(cfg.os_type, vm_mode.HVM) self.assertEqual(cfg.os_boot_dev, ["hd"]) self.assertIsNone(cfg.os_root) self.assertEqual(len(cfg.devices), 10) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) def test_get_guest_config_bug_1118829(self): self.flags(virt_type='uml', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) disk_info = {'disk_bus': 'virtio', 'cdrom_bus': 'ide', 'mapping': {u'vda': {'bus': 'virtio', 'type': 'disk', 'dev': u'vda'}, 'root': {'bus': 'virtio', 'type': 'disk', 'dev': 'vda'}}} # NOTE(jdg): For this specific test leave this blank # This will exercise the failed code path still, # and won't require fakes and stubs of the iscsi discovery block_device_info = {} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, block_device_info) self.assertEqual(instance_ref['root_device_name'], '/dev/vda') def test_get_guest_config_with_root_device_name(self): self.flags(virt_type='uml', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) block_device_info = {'root_device_name': '/dev/vdb'} disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, block_device_info) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, block_device_info) self.assertEqual(0, len(cfg.features)) self.assertEqual(cfg.memory, instance_ref.flavor.memory_mb * units.Ki) self.assertEqual(cfg.vcpus, instance_ref.flavor.vcpus) self.assertEqual(cfg.os_type, "uml") self.assertEqual(cfg.os_boot_dev, []) self.assertEqual(cfg.os_root, '/dev/vdb') self.assertEqual(len(cfg.devices), 3) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) def test_has_uefi_support_with_invalid_version(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) with mock.patch.object(drvr._host, 'has_min_version', return_value=False): self.assertFalse(drvr._has_uefi_support()) def test_has_uefi_support_not_supported_arch(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "alpha" self.assertFalse(drvr._has_uefi_support()) @mock.patch('os.path.exists', return_value=False) def test_has_uefi_support_with_no_loader_existed(self, mock_exist): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertFalse(drvr._has_uefi_support()) @mock.patch('os.path.exists', return_value=True) def test_has_uefi_support(self, mock_has_version): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" with mock.patch.object(drvr._host, 'has_min_version', return_value=True): self.assertTrue(drvr._has_uefi_support()) def test_get_guest_config_with_uefi(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_firmware_type": "uefi"}}) instance_ref = objects.Instance(**self.test_instance) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with mock.patch.object(drvr, "_has_uefi_support", return_value=True) as mock_support: cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) mock_support.assert_called_once_with() self.assertEqual(cfg.os_loader_type, "pflash") def test_get_guest_config_with_block_device(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) conn_info = {'driver_volume_type': 'fake'} bdms = block_device_obj.block_device_make_list_from_dicts( self.context, [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vdc'}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vdd'}), ] ) info = {'block_device_mapping': driver_block_device.convert_volumes( bdms )} info['block_device_mapping'][0]['connection_info'] = conn_info info['block_device_mapping'][1]['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, info) with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, info) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, 'vdc') self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[3].target_dev, 'vdd') mock_save.assert_called_with() def test_get_guest_config_lxc_with_attached_volume(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) conn_info = {'driver_volume_type': 'fake'} bdms = block_device_obj.block_device_make_list_from_dicts( self.context, [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'boot_index': 0}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'source_type': 'volume', 'destination_type': 'volume', }), fake_block_device.FakeDbBlockDeviceDict( {'id': 3, 'source_type': 'volume', 'destination_type': 'volume', }), ] ) info = {'block_device_mapping': driver_block_device.convert_volumes( bdms )} info['block_device_mapping'][0]['connection_info'] = conn_info info['block_device_mapping'][1]['connection_info'] = conn_info info['block_device_mapping'][2]['connection_info'] = conn_info info['block_device_mapping'][0]['mount_device'] = '/dev/vda' info['block_device_mapping'][1]['mount_device'] = '/dev/vdc' info['block_device_mapping'][2]['mount_device'] = '/dev/vdd' with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, info) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, info) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[1].target_dev, 'vdc') self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, 'vdd') mock_save.assert_called_with() def test_get_guest_config_with_configdrive(self): # It's necessary to check if the architecture is power, because # power doesn't have support to ide, and so libvirt translate # all ide calls to scsi drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) # make configdrive.required_by() return True instance_ref['config_drive'] = True disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) # The last device is selected for this. on x86 is the last ide # device (hdd). Since power only support scsi, the last device # is sdz expect = {"ppc": "sdz", "ppc64": "sdz", "ppc64le": "sdz", "aarch64": "sdz"} disk = expect.get(blockinfo.libvirt_utils.get_arch({}), "hdd") self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, disk) def test_get_guest_config_with_virtio_scsi_bus(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_scsi_model": "virtio-scsi"}}) instance_ref = objects.Instance(**self.test_instance) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, []) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestController) self.assertEqual(cfg.devices[2].model, 'virtio-scsi') def test_get_guest_config_with_virtio_scsi_bus_bdm(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_scsi_model": "virtio-scsi"}}) instance_ref = objects.Instance(**self.test_instance) conn_info = {'driver_volume_type': 'fake'} bdms = block_device_obj.block_device_make_list_from_dicts( self.context, [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sdc', 'disk_bus': 'scsi'}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sdd', 'disk_bus': 'scsi'}), ] ) bd_info = { 'block_device_mapping': driver_block_device.convert_volumes(bdms)} bd_info['block_device_mapping'][0]['connection_info'] = conn_info bd_info['block_device_mapping'][1]['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, bd_info) with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, [], bd_info) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, 'sdc') self.assertEqual(cfg.devices[2].target_bus, 'scsi') self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[3].target_dev, 'sdd') self.assertEqual(cfg.devices[3].target_bus, 'scsi') self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestController) self.assertEqual(cfg.devices[4].model, 'virtio-scsi') mock_save.assert_called_with() def test_get_guest_config_with_vnc(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='kvm', group='libvirt') self.flags(pointer_model='ps2mouse') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 7) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "vnc") def test_get_guest_config_with_vnc_and_tablet(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "vnc") def test_get_guest_config_with_spice_and_tablet(self): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=True, agent_enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "spice") def test_get_guest_config_with_spice_and_agent(self): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].target_name, "com.redhat.spice.0") self.assertEqual(cfg.devices[5].type, "spice") self.assertEqual(cfg.devices[6].type, "qxl") @mock.patch('nova.console.serial.acquire_port') @mock.patch('nova.virt.hardware.get_number_of_serial_ports', return_value=1) @mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',) def test_create_serial_console_devices_based_on_arch(self, mock_get_arch, mock_get_port_number, mock_acquire_port): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) expected = {arch.X86_64: vconfig.LibvirtConfigGuestSerial, arch.S390: vconfig.LibvirtConfigGuestConsole, arch.S390X: vconfig.LibvirtConfigGuestConsole} for guest_arch, device_type in expected.items(): mock_get_arch.return_value = guest_arch guest = vconfig.LibvirtConfigGuest() drvr._create_serial_console_devices(guest, instance=None, flavor={}, image_meta={}) self.assertEqual(1, len(guest.devices)) console_device = guest.devices[0] self.assertIsInstance(console_device, device_type) self.assertEqual("tcp", console_device.type) @mock.patch('nova.virt.hardware.get_number_of_serial_ports', return_value=4) @mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch', side_effect=[arch.X86_64, arch.S390, arch.S390X]) def test_create_serial_console_devices_with_limit_exceeded_based_on_arch( self, mock_get_arch, mock_get_port_number): self.flags(enabled=True, group='serial_console') self.flags(virt_type="qemu", group='libvirt') flavor = 'fake_flavor' image_meta = objects.ImageMeta() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) guest = vconfig.LibvirtConfigGuest() self.assertRaises(exception.SerialPortNumberLimitExceeded, drvr._create_serial_console_devices, guest, None, flavor, image_meta) mock_get_arch.assert_called_with(image_meta) mock_get_port_number.assert_called_with(flavor, image_meta) drvr._create_serial_console_devices(guest, None, flavor, image_meta) mock_get_arch.assert_called_with(image_meta) mock_get_port_number.assert_called_with(flavor, image_meta) drvr._create_serial_console_devices(guest, None, flavor, image_meta) mock_get_arch.assert_called_with(image_meta) mock_get_port_number.assert_called_with(flavor, image_meta) @mock.patch('nova.console.serial.acquire_port') def test_get_guest_config_serial_console(self, acquire_port): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) acquire_port.return_value = 11111 cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(8, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("tcp", cfg.devices[2].type) self.assertEqual(11111, cfg.devices[2].listen_port) def test_get_guest_config_serial_console_through_flavor(self): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw:serial_port_count': 3} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(10, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("tcp", cfg.devices[2].type) self.assertEqual("tcp", cfg.devices[3].type) self.assertEqual("tcp", cfg.devices[4].type) def test_get_guest_config_serial_console_invalid_flavor(self): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw:serial_port_count': "a"} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises( exception.ImageSerialPortNumberInvalid, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_get_guest_config_serial_console_image_and_flavor(self): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_serial_port_count": "3"}}) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw:serial_port_count': 4} disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(10, len(cfg.devices), cfg.devices) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("tcp", cfg.devices[2].type) self.assertEqual("tcp", cfg.devices[3].type) self.assertEqual("tcp", cfg.devices[4].type) @mock.patch('nova.console.serial.acquire_port') def test_get_guest_config_serial_console_through_port_rng_exhausted( self, acquire_port): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) acquire_port.side_effect = exception.SocketPortRangeExhaustedException( '127.0.0.1') self.assertRaises( exception.SocketPortRangeExhaustedException, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) @mock.patch('nova.console.serial.release_port') @mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') @mock.patch.object(host.Host, 'get_guest') @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_serial_ports_from_guest') def test_serial_console_release_port( self, mock_get_serial_ports_from_guest, mock_get_guest, mock_get_info, mock_release_port): self.flags(enabled="True", group='serial_console') guest = libvirt_guest.Guest(FakeVirtDomain()) guest.power_off = mock.Mock() mock_get_info.return_value = hardware.InstanceInfo( state=power_state.SHUTDOWN) mock_get_guest.return_value = guest mock_get_serial_ports_from_guest.return_value = iter([ ('127.0.0.1', 10000), ('127.0.0.1', 10001)]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._destroy(objects.Instance(**self.test_instance)) mock_release_port.assert_has_calls( [mock.call(host='127.0.0.1', port=10000), mock.call(host='127.0.0.1', port=10001)]) @mock.patch('os.path.getsize', return_value=0) # size doesn't matter @mock.patch('nova.virt.libvirt.storage.lvm.get_volume_size', return_value='fake-size') def test_detach_encrypted_volumes(self, mock_getsize, mock_get_volume_size): """Test that unencrypted volumes are not disconnected with dmcrypt.""" instance = objects.Instance(**self.test_instance) xml = """ <domain type='kvm'> <devices> <disk type='file'> <driver name='fake-driver' type='fake-type' /> <source file='filename'/> <target dev='vdc' bus='virtio'/> </disk> <disk type='block' device='disk'> <driver name='fake-driver' type='fake-type' /> <source dev='/dev/mapper/disk'/> <target dev='vda'/> </disk> <disk type='block' device='disk'> <driver name='fake-driver' type='fake-type' /> <source dev='/dev/mapper/swap'/> <target dev='vdb'/> </disk> </devices> </domain> """ dom = FakeVirtDomain(fake_xml=xml) instance.ephemeral_key_uuid = uuids.ephemeral_key_uuid # encrypted conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) @mock.patch.object(dmcrypt, 'delete_volume') @mock.patch.object(conn._host, 'get_domain', return_value=dom) def detach_encrypted_volumes(block_device_info, mock_get_domain, mock_delete_volume): conn._detach_encrypted_volumes(instance, block_device_info) mock_get_domain.assert_called_once_with(instance) self.assertFalse(mock_delete_volume.called) block_device_info = {'root_device_name': '/dev/vda', 'ephemerals': [], 'block_device_mapping': []} detach_encrypted_volumes(block_device_info) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest(None, mock_get_xml_desc) self.assertEqual([ ('127.0.0.1', 100), ('127.0.0.1', 101), ('127.0.0.2', 100), ('127.0.0.2', 101)], list(i)) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest_bind_only(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest('bind', mock_get_xml_desc) self.assertEqual([ ('127.0.0.1', 101), ('127.0.0.2', 100)], list(i)) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest_connect_only(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest('connect', mock_get_xml_desc) self.assertEqual([ ('127.0.0.1', 100), ('127.0.0.2', 101)], list(i)) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest_on_s390(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest(None, mock_get_xml_desc, 'console') self.assertEqual([ ('127.0.0.1', 100), ('127.0.0.1', 101), ('127.0.0.2', 100), ('127.0.0.2', 101)], list(i)) def _test_get_serial_ports_from_guest(self, mode, mock_get_xml_desc, dev_name='serial'): xml = """ <domain type='kvm'> <devices> <%(dev_name)s type="tcp"> <source host="127.0.0.1" service="100" mode="connect"/> </%(dev_name)s> <%(dev_name)s type="tcp"> <source host="127.0.0.1" service="101" mode="bind"/> </%(dev_name)s> <%(dev_name)s type="tcp"> <source host="127.0.0.2" service="100" mode="bind"/> </%(dev_name)s> <%(dev_name)s type="tcp"> <source host="127.0.0.2" service="101" mode="connect"/> </%(dev_name)s> </devices> </domain>""" % {'dev_name': dev_name} mock_get_xml_desc.return_value = xml drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) guest = libvirt_guest.Guest(FakeVirtDomain()) return drvr._get_serial_ports_from_guest(guest, mode=mode) def test_get_guest_config_with_type_xen(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='xen', use_usb_tablet=False, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 6) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[3].type, "vnc") self.assertEqual(cfg.devices[4].type, "xen") @mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch', return_value=arch.S390X) def test_get_guest_config_with_type_kvm_on_s390(self, mock_get_arch): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=False, group='libvirt') self._stub_host_capabilities_cpu_arch(arch.S390X) instance_ref = objects.Instance(**self.test_instance) cfg = self._get_guest_config_via_fake_api(instance_ref) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) log_file_device = cfg.devices[2] self.assertIsInstance(log_file_device, vconfig.LibvirtConfigGuestConsole) self.assertEqual("sclplm", log_file_device.target_type) self.assertEqual("file", log_file_device.type) terminal_device = cfg.devices[3] self.assertIsInstance(terminal_device, vconfig.LibvirtConfigGuestConsole) self.assertEqual("sclp", terminal_device.target_type) self.assertEqual("pty", terminal_device.type) self.assertEqual("s390-ccw-virtio", cfg.os_mach_type) def _stub_host_capabilities_cpu_arch(self, cpu_arch): def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = cpu_arch caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu return caps self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) def _get_guest_config_via_fake_api(self, instance): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) return drvr._get_guest_config(instance, [], image_meta, disk_info) def test_get_guest_config_with_type_xen_pae_hvm(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='xen', use_usb_tablet=False, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['vm_mode'] = vm_mode.HVM image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(cfg.os_type, vm_mode.HVM) self.assertEqual(cfg.os_loader, CONF.libvirt.xen_hvmloader_path) self.assertEqual(3, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeaturePAE) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[2], vconfig.LibvirtConfigGuestFeatureAPIC) def test_get_guest_config_with_type_xen_pae_pvm(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='xen', use_usb_tablet=False, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(cfg.os_type, vm_mode.XEN) self.assertEqual(1, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeaturePAE) def test_get_guest_config_with_vnc_and_spice(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 10) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].target_name, "com.redhat.spice.0") self.assertEqual(cfg.devices[6].type, "vnc") self.assertEqual(cfg.devices[7].type, "spice") def test_get_guest_config_with_watchdog_action_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_watchdog_action": "none"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 9) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestWatchdog) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("none", cfg.devices[7].action) def _test_get_guest_usb_tablet(self, vnc_enabled, spice_enabled, os_type, agent_enabled=False, image_meta=None): self.flags(enabled=vnc_enabled, group='vnc') self.flags(enabled=spice_enabled, agent_enabled=agent_enabled, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict(image_meta) return drvr._get_guest_pointer_model(os_type, image_meta) def test_use_ps2_mouse(self): self.flags(pointer_model='ps2mouse') tablet = self._test_get_guest_usb_tablet(True, True, vm_mode.HVM) self.assertIsNone(tablet) def test_get_guest_usb_tablet_wipe(self): self.flags(use_usb_tablet=True, group='libvirt') tablet = self._test_get_guest_usb_tablet(True, True, vm_mode.HVM) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet(True, False, vm_mode.HVM) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet(False, True, vm_mode.HVM) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet(False, False, vm_mode.HVM) self.assertIsNone(tablet) tablet = self._test_get_guest_usb_tablet(True, True, "foo") self.assertIsNone(tablet) tablet = self._test_get_guest_usb_tablet( False, True, vm_mode.HVM, True) self.assertIsNone(tablet) def test_get_guest_usb_tablet_image_meta(self): self.flags(use_usb_tablet=True, group='libvirt') image_meta = {"properties": {"hw_pointer_model": "usbtablet"}} tablet = self._test_get_guest_usb_tablet( True, True, vm_mode.HVM, image_meta=image_meta) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet( True, False, vm_mode.HVM, image_meta=image_meta) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet( False, True, vm_mode.HVM, image_meta=image_meta) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet( False, False, vm_mode.HVM, image_meta=image_meta) self.assertIsNone(tablet) tablet = self._test_get_guest_usb_tablet( True, True, "foo", image_meta=image_meta) self.assertIsNone(tablet) tablet = self._test_get_guest_usb_tablet( False, True, vm_mode.HVM, True, image_meta=image_meta) self.assertIsNone(tablet) def test_get_guest_usb_tablet_image_meta_no_vnc(self): self.flags(use_usb_tablet=False, group='libvirt') self.flags(pointer_model=None) image_meta = {"properties": {"hw_pointer_model": "usbtablet"}} self.assertRaises( exception.UnsupportedPointerModelRequested, self._test_get_guest_usb_tablet, False, False, vm_mode.HVM, True, image_meta=image_meta) def test_get_guest_no_pointer_model_usb_tablet_set(self): self.flags(use_usb_tablet=True, group='libvirt') self.flags(pointer_model=None) tablet = self._test_get_guest_usb_tablet(True, True, vm_mode.HVM) self.assertIsNotNone(tablet) def test_get_guest_no_pointer_model_usb_tablet_not_set(self): self.flags(use_usb_tablet=False, group='libvirt') self.flags(pointer_model=None) tablet = self._test_get_guest_usb_tablet(True, True, vm_mode.HVM) self.assertIsNone(tablet) def test_get_guest_pointer_model_usb_tablet(self): self.flags(use_usb_tablet=False, group='libvirt') self.flags(pointer_model='usbtablet') tablet = self._test_get_guest_usb_tablet(True, True, vm_mode.HVM) self.assertIsNotNone(tablet) def test_get_guest_pointer_model_usb_tablet_image(self): image_meta = {"properties": {"hw_pointer_model": "usbtablet"}} tablet = self._test_get_guest_usb_tablet( True, True, vm_mode.HVM, image_meta=image_meta) self.assertIsNotNone(tablet) def test_get_guest_pointer_model_usb_tablet_image_no_HVM(self): self.flags(pointer_model=None) self.flags(use_usb_tablet=False, group='libvirt') image_meta = {"properties": {"hw_pointer_model": "usbtablet"}} self.assertRaises( exception.UnsupportedPointerModelRequested, self._test_get_guest_usb_tablet, True, True, vm_mode.XEN, image_meta=image_meta) def _test_get_guest_config_with_watchdog_action_flavor(self, hw_watchdog_action="hw:watchdog_action"): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {hw_watchdog_action: 'none'} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(9, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestWatchdog) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("none", cfg.devices[7].action) def test_get_guest_config_with_watchdog_action_through_flavor(self): self._test_get_guest_config_with_watchdog_action_flavor() # TODO(pkholkin): the test accepting old property name 'hw_watchdog_action' # should be removed in the next release def test_get_guest_config_with_watchdog_action_through_flavor_no_scope( self): self._test_get_guest_config_with_watchdog_action_flavor( hw_watchdog_action="hw_watchdog_action") def test_get_guest_config_with_watchdog_overrides_flavor(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_watchdog_action': 'none'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_watchdog_action": "pause"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(9, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestWatchdog) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("pause", cfg.devices[7].action) def test_get_guest_config_with_video_driver_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "vmvga"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[5].type, "vnc") self.assertEqual(cfg.devices[6].type, "vmvga") def test_get_guest_config_with_qga_through_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_qemu_guest_agent": "yes"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 9) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "vnc") self.assertEqual(cfg.devices[7].type, "unix") self.assertEqual(cfg.devices[7].target_name, "org.qemu.guest_agent.0") def test_get_guest_config_with_video_driver_vram(self): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_video:ram_max_mb': "100"} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "qxl", "hw_video_ram": "64"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[5].type, "spice") self.assertEqual(cfg.devices[6].type, "qxl") self.assertEqual(cfg.devices[6].vram, 64 * units.Mi / units.Ki) @mock.patch('nova.virt.disk.api.teardown_container') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_unmount_fs_if_error_during_lxc_create_domain(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_get_info, mock_teardown): """If we hit an error during a `_create_domain` call to `libvirt+lxc` we need to ensure the guest FS is unmounted from the host so that any future `lvremove` calls will work. """ self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_get_info.side_effect = exception.InstanceNotFound( instance_id='foo') drvr._conn.defineXML = mock.Mock() drvr._conn.defineXML.side_effect = ValueError('somethingbad') with test.nested( mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr, 'firewall_driver'), mock.patch.object(drvr, 'cleanup')): self.assertRaises(ValueError, drvr._create_domain_and_network, self.context, 'xml', mock_instance, None, None) mock_teardown.assert_called_with(container_dir='/tmp/rootfs') def test_video_driver_flavor_limit_not_set(self): self.flags(virt_type='kvm', group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "qxl", "hw_video_ram": "64"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with mock.patch.object(objects.Instance, 'save'): self.assertRaises(exception.RequestedVRamTooHigh, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_video_driver_ram_above_flavor_limit(self): self.flags(virt_type='kvm', group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') instance_ref = objects.Instance(**self.test_instance) instance_type = instance_ref.get_flavor() instance_type.extra_specs = {'hw_video:ram_max_mb': "50"} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "qxl", "hw_video_ram": "64"}}) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with mock.patch.object(objects.Instance, 'save'): self.assertRaises(exception.RequestedVRamTooHigh, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_get_guest_config_without_qga_through_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_qemu_guest_agent": "no"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "vnc") def test_get_guest_config_with_rng_device(self): self.flags(virt_type='kvm', group='libvirt') self.flags(pointer_model='ps2mouse') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestRng) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[6].model, 'random') self.assertIsNone(cfg.devices[6].backend) self.assertIsNone(cfg.devices[6].rate_bytes) self.assertIsNone(cfg.devices[6].rate_period) def test_get_guest_config_with_rng_not_allowed(self): self.flags(virt_type='kvm', group='libvirt') self.flags(pointer_model='ps2mouse') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 7) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigMemoryBalloon) def test_get_guest_config_with_rng_limits(self): self.flags(virt_type='kvm', group='libvirt') self.flags(pointer_model='ps2mouse') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True', 'hw_rng:rate_bytes': '1024', 'hw_rng:rate_period': '2'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestRng) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[6].model, 'random') self.assertIsNone(cfg.devices[6].backend) self.assertEqual(cfg.devices[6].rate_bytes, 1024) self.assertEqual(cfg.devices[6].rate_period, 2) @mock.patch('nova.virt.libvirt.driver.os.path.exists') def test_get_guest_config_with_rng_backend(self, mock_path): self.flags(virt_type='kvm', rng_dev_path='/dev/hw_rng', group='libvirt') self.flags(pointer_model='ps2mouse') mock_path.return_value = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestRng) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[6].model, 'random') self.assertEqual(cfg.devices[6].backend, '/dev/hw_rng') self.assertIsNone(cfg.devices[6].rate_bytes) self.assertIsNone(cfg.devices[6].rate_period) @mock.patch('nova.virt.libvirt.driver.os.path.exists') def test_get_guest_config_with_rng_dev_not_present(self, mock_path): self.flags(virt_type='kvm', use_usb_tablet=False, rng_dev_path='/dev/hw_rng', group='libvirt') mock_path.return_value = False drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises(exception.RngDeviceNotExist, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_guest_cpu_shares_with_multi_vcpu(self, is_able): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.vcpus = 4 image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(4096, cfg.cputune.shares) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_with_cpu_quota(self, is_able): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000', 'quota:cpu_period': '20000'} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(10000, cfg.cputune.shares) self.assertEqual(20000, cfg.cputune.period) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_with_bogus_cpu_quota(self, is_able): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'quota:cpu_shares': 'fishfood', 'quota:cpu_period': '20000'} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises(ValueError, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=False) def test_get_update_guest_cputune(self, is_able): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000', 'quota:cpu_period': '20000'} self.assertRaises( exception.UnsupportedHostCPUControlPolicy, drvr._update_guest_cputune, {}, instance_ref.flavor, "kvm") def _test_get_guest_config_sysinfo_serial(self, expected_serial): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) cfg = drvr._get_guest_config_sysinfo(instance_ref) self.assertIsInstance(cfg, vconfig.LibvirtConfigGuestSysinfo) self.assertEqual(version.vendor_string(), cfg.system_manufacturer) self.assertEqual(version.product_string(), cfg.system_product) self.assertEqual(version.version_string_with_package(), cfg.system_version) self.assertEqual(expected_serial, cfg.system_serial) self.assertEqual(instance_ref['uuid'], cfg.system_uuid) self.assertEqual("Virtual Machine", cfg.system_family) def test_get_guest_config_sysinfo_serial_none(self): self.flags(sysinfo_serial="none", group="libvirt") self._test_get_guest_config_sysinfo_serial(None) @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_sysinfo_serial_hardware") def test_get_guest_config_sysinfo_serial_hardware(self, mock_uuid): self.flags(sysinfo_serial="hardware", group="libvirt") theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" mock_uuid.return_value = theuuid self._test_get_guest_config_sysinfo_serial(theuuid) @contextlib.contextmanager def patch_exists(self, result): real_exists = os.path.exists def fake_exists(filename): if filename == "/etc/machine-id": return result return real_exists(filename) with mock.patch.object(os.path, "exists") as mock_exists: mock_exists.side_effect = fake_exists yield mock_exists def test_get_guest_config_sysinfo_serial_os(self): self.flags(sysinfo_serial="os", group="libvirt") theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" with test.nested( mock.patch.object(six.moves.builtins, "open", mock.mock_open(read_data=theuuid)), self.patch_exists(True)): self._test_get_guest_config_sysinfo_serial(theuuid) def test_get_guest_config_sysinfo_serial_os_empty_machine_id(self): self.flags(sysinfo_serial="os", group="libvirt") with test.nested( mock.patch.object(six.moves.builtins, "open", mock.mock_open(read_data="")), self.patch_exists(True)): self.assertRaises(exception.NovaException, self._test_get_guest_config_sysinfo_serial, None) def test_get_guest_config_sysinfo_serial_os_no_machine_id_file(self): self.flags(sysinfo_serial="os", group="libvirt") with self.patch_exists(False): self.assertRaises(exception.NovaException, self._test_get_guest_config_sysinfo_serial, None) def test_get_guest_config_sysinfo_serial_auto_hardware(self): self.flags(sysinfo_serial="auto", group="libvirt") real_exists = os.path.exists with test.nested( mock.patch.object(os.path, "exists"), mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_sysinfo_serial_hardware") ) as (mock_exists, mock_uuid): def fake_exists(filename): if filename == "/etc/machine-id": return False return real_exists(filename) mock_exists.side_effect = fake_exists theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" mock_uuid.return_value = theuuid self._test_get_guest_config_sysinfo_serial(theuuid) def test_get_guest_config_sysinfo_serial_auto_os(self): self.flags(sysinfo_serial="auto", group="libvirt") real_exists = os.path.exists real_open = builtins.open with test.nested( mock.patch.object(os.path, "exists"), mock.patch.object(builtins, "open"), ) as (mock_exists, mock_open): def fake_exists(filename): if filename == "/etc/machine-id": return True return real_exists(filename) mock_exists.side_effect = fake_exists theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" def fake_open(filename, *args, **kwargs): if filename == "/etc/machine-id": h = mock.MagicMock() h.read.return_value = theuuid h.__enter__.return_value = h return h return real_open(filename, *args, **kwargs) mock_open.side_effect = fake_open self._test_get_guest_config_sysinfo_serial(theuuid) def _create_fake_service_compute(self): service_info = { 'id': 1729, 'host': 'fake', 'report_count': 0 } service_ref = objects.Service(**service_info) compute_info = { 'id': 1729, 'vcpus': 2, 'memory_mb': 1024, 'local_gb': 2048, 'vcpus_used': 0, 'memory_mb_used': 0, 'local_gb_used': 0, 'free_ram_mb': 1024, 'free_disk_gb': 2048, 'hypervisor_type': 'xen', 'hypervisor_version': 1, 'running_vms': 0, 'cpu_info': '', 'current_workload': 0, 'service_id': service_ref['id'], 'host': service_ref['host'] } compute_ref = objects.ComputeNode(**compute_info) return (service_ref, compute_ref) def test_get_guest_config_with_pci_passthrough_kvm(self): self.flags(virt_type='kvm', group='libvirt') service_ref, compute_ref = self._create_fake_service_compute() instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.ALLOCATED, address='0000:00:00.1', compute_id=compute_ref.id, instance_uuid=instance.uuid, request_id=None, extra_info={}) pci_device = objects.PciDevice(**pci_device_info) pci_list = objects.PciDeviceList() pci_list.objects.append(pci_device) instance.pci_devices = pci_list drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) cfg = drvr._get_guest_config(instance, [], image_meta, disk_info) had_pci = 0 # care only about the PCI devices for dev in cfg.devices: if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI: had_pci += 1 self.assertEqual(dev.type, 'pci') self.assertEqual(dev.managed, 'yes') self.assertEqual(dev.mode, 'subsystem') self.assertEqual(dev.domain, "0000") self.assertEqual(dev.bus, "00") self.assertEqual(dev.slot, "00") self.assertEqual(dev.function, "1") self.assertEqual(had_pci, 1) def test_get_guest_config_with_pci_passthrough_xen(self): self.flags(virt_type='xen', group='libvirt') service_ref, compute_ref = self._create_fake_service_compute() instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.ALLOCATED, address='0000:00:00.2', compute_id=compute_ref.id, instance_uuid=instance.uuid, request_id=None, extra_info={}) pci_device = objects.PciDevice(**pci_device_info) pci_list = objects.PciDeviceList() pci_list.objects.append(pci_device) instance.pci_devices = pci_list drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) cfg = drvr._get_guest_config(instance, [], image_meta, disk_info) had_pci = 0 # care only about the PCI devices for dev in cfg.devices: if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI: had_pci += 1 self.assertEqual(dev.type, 'pci') self.assertEqual(dev.managed, 'no') self.assertEqual(dev.mode, 'subsystem') self.assertEqual(dev.domain, "0000") self.assertEqual(dev.bus, "00") self.assertEqual(dev.slot, "00") self.assertEqual(dev.function, "2") self.assertEqual(had_pci, 1) def test_get_guest_config_os_command_line_through_image_meta(self): self.flags(virt_type="kvm", cpu_mode='none', group='libvirt') self.test_instance['kernel_id'] = "fake_kernel_id" drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"os_command_line": "fake_os_command_line"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_cmdline, "fake_os_command_line") def test_get_guest_config_os_command_line_without_kernel_id(self): self.flags(virt_type="kvm", cpu_mode='none', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"os_command_line": "fake_os_command_line"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsNone(cfg.os_cmdline) def test_get_guest_config_os_command_empty(self): self.flags(virt_type="kvm", cpu_mode='none', group='libvirt') self.test_instance['kernel_id'] = "fake_kernel_id" drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"os_command_line": ""}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) # the instance has 'root=/dev/vda console=tty0 console=ttyS0' set by # default, so testing an empty string and None value in the # os_command_line image property must pass cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertNotEqual(cfg.os_cmdline, "") @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_guest_storage_config") @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support") def test_get_guest_config_armv7(self, mock_numa, mock_storage): def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = arch.ARMV7 caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu return caps self.flags(virt_type="kvm", group="libvirt") instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "vexpress-a15") @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_guest_storage_config") @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support") def test_get_guest_config_aarch64(self, mock_numa, mock_storage): def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = arch.AARCH64 caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu return caps self.flags(virt_type="kvm", group="libvirt") instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "virt") def test_get_guest_config_machine_type_s390(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigGuestCPU() image_meta = objects.ImageMeta.from_dict(self.test_image_meta) host_cpu_archs = (arch.S390, arch.S390X) for host_cpu_arch in host_cpu_archs: caps.host.cpu.arch = host_cpu_arch os_mach_type = drvr._get_machine_type(image_meta, caps) self.assertEqual('s390-ccw-virtio', os_mach_type) def test_get_guest_config_machine_type_through_image_meta(self): self.flags(virt_type="kvm", group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_machine_type": "fake_machine_type"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "fake_machine_type") def test_get_guest_config_machine_type_from_config(self): self.flags(virt_type='kvm', group='libvirt') self.flags(hw_machine_type=['x86_64=fake_machine_type'], group='libvirt') def fake_getCapabilities(): return """ <capabilities> <host> <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid> <cpu> <arch>x86_64</arch> <model>Penryn</model> <vendor>Intel</vendor> <topology sockets='1' cores='2' threads='1'/> <feature name='xtpr'/> </cpu> </host> </capabilities> """ def fake_baselineCPU(cpu, flag): return """<cpu mode='custom' match='exact'> <model fallback='allow'>Penryn</model> <vendor>Intel</vendor> <feature policy='require' name='xtpr'/> </cpu> """ # Make sure the host arch is mocked as x86_64 self.create_fake_libvirt_mock(getCapabilities=fake_getCapabilities, baselineCPU=fake_baselineCPU, getVersion=lambda: 1005001) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "fake_machine_type") def _test_get_guest_config_ppc64(self, device_index): """Test for nova.virt.libvirt.driver.LibvirtDriver._get_guest_config. """ self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) expected = (arch.PPC64, arch.PPC) for guestarch in expected: with mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch', return_value=guestarch): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsInstance(cfg.devices[device_index], vconfig.LibvirtConfigGuestVideo) self.assertEqual(cfg.devices[device_index].type, 'vga') def test_get_guest_config_ppc64_through_image_meta_vnc_enabled(self): self.flags(enabled=True, group='vnc') self._test_get_guest_config_ppc64(6) def test_get_guest_config_ppc64_through_image_meta_spice_enabled(self): self.flags(enabled=True, agent_enabled=True, group='spice') self._test_get_guest_config_ppc64(8) def _test_get_guest_config_bootmenu(self, image_meta, extra_specs): self.flags(virt_type='kvm', group='libvirt') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = extra_specs disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertTrue(conf.os_bootmenu) def test_get_guest_config_bootmenu_via_image_meta(self): image_meta = objects.ImageMeta.from_dict( {"disk_format": "raw", "properties": {"hw_boot_menu": "True"}}) self._test_get_guest_config_bootmenu(image_meta, {}) def test_get_guest_config_bootmenu_via_extra_specs(self): image_meta = objects.ImageMeta.from_dict( self.test_image_meta) self._test_get_guest_config_bootmenu(image_meta, {'hw:boot_menu': 'True'}) def test_get_guest_cpu_config_none(self): self.flags(cpu_mode="none", group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertIsNone(conf.cpu.mode) self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_default_kvm(self): self.flags(virt_type="kvm", cpu_mode='none', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertIsNone(conf.cpu.mode) self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_default_uml(self): self.flags(virt_type="uml", cpu_mode='none', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsNone(conf.cpu) def test_get_guest_cpu_config_default_lxc(self): self.flags(virt_type="lxc", cpu_mode='none', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsNone(conf.cpu) def test_get_guest_cpu_config_host_passthrough(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(cpu_mode="host-passthrough", group='libvirt') disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "host-passthrough") self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_host_model(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(cpu_mode="host-model", group='libvirt') disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "host-model") self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_custom(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(cpu_mode="custom", cpu_model="Penryn", group='libvirt') disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "custom") self.assertEqual(conf.cpu.model, "Penryn") self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_topology(self): instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.vcpus = 8 instance_ref.flavor.extra_specs = {'hw:cpu_max_sockets': '4'} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "host-model") self.assertEqual(conf.cpu.sockets, 4) self.assertEqual(conf.cpu.cores, 2) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_memory_balloon_config_by_default(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('virtio', device.model) self.assertEqual(10, device.period) def test_get_guest_memory_balloon_config_disable(self): self.flags(mem_stats_period_seconds=0, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) no_exist = True for device in cfg.devices: if device.root_name == 'memballoon': no_exist = False break self.assertTrue(no_exist) def test_get_guest_memory_balloon_config_period_value(self): self.flags(mem_stats_period_seconds=21, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('virtio', device.model) self.assertEqual(21, device.period) def test_get_guest_memory_balloon_config_qemu(self): self.flags(virt_type='qemu', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('virtio', device.model) self.assertEqual(10, device.period) def test_get_guest_memory_balloon_config_xen(self): self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('xen', device.model) self.assertEqual(10, device.period) def test_get_guest_memory_balloon_config_lxc(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) no_exist = True for device in cfg.devices: if device.root_name == 'memballoon': no_exist = False break self.assertTrue(no_exist) @mock.patch('nova.virt.libvirt.driver.LOG.warning') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(host.Host, "get_capabilities") def test_get_supported_perf_events_foo(self, mock_get_caps, mock_min_version, mock_warn): self.flags(enabled_perf_events=['foo'], group='libvirt') caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() mock_get_caps.return_value = caps drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) events = drvr._get_supported_perf_events() self.assertTrue(mock_warn.called) self.assertEqual([], events) @mock.patch.object(host.Host, "get_capabilities") def _test_get_guest_with_perf(self, caps, events, mock_get_caps): mock_get_caps.return_value = caps drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host('test_perf') instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(events, cfg.perf_events) @mock.patch.object(fakelibvirt, 'VIR_PERF_PARAM_CMT', True, create=True) @mock.patch.object(fakelibvirt, 'VIR_PERF_PARAM_MBMT', True, create=True) @mock.patch.object(fakelibvirt, 'VIR_PERF_PARAM_MBML', True, create=True) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_get_guest_with_perf_supported(self, mock_min_version): self.flags(enabled_perf_events=['cmt', 'mbml', 'mbmt'], group='libvirt') caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() features = [] for f in ('cmt', 'mbm_local', 'mbm_total'): feature = vconfig.LibvirtConfigGuestCPUFeature() feature.name = f feature.policy = cpumodel.POLICY_REQUIRE features.append(feature) caps.host.cpu.features = set(features) self._test_get_guest_with_perf(caps, ['cmt', 'mbml', 'mbmt']) @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_with_perf_libvirt_unsupported(self, mock_min_version): def fake_has_min_version(lv_ver=None, hv_ver=None, hv_type=None): if lv_ver == libvirt_driver.MIN_LIBVIRT_PERF_VERSION: return False return True mock_min_version.side_effect = fake_has_min_version self.flags(enabled_perf_events=['cmt'], group='libvirt') caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" self._test_get_guest_with_perf(caps, []) @mock.patch.object(fakelibvirt, 'VIR_PERF_PARAM_CMT', True, create=True) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_get_guest_with_perf_host_unsupported(self, mock_min_version): self.flags(enabled_perf_events=['cmt'], group='libvirt') caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() self._test_get_guest_with_perf(caps, []) def test_xml_and_uri_no_ramdisk_no_kernel(self): instance_data = dict(self.test_instance) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False) def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self): instance_data = dict(self.test_instance) instance_data.update({'vm_mode': vm_mode.HVM}) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False, expect_xen_hvm=True) def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self): instance_data = dict(self.test_instance) instance_data.update({'vm_mode': vm_mode.XEN}) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False, expect_xen_hvm=False, xen_only=True) def test_xml_and_uri_no_ramdisk(self): instance_data = dict(self.test_instance) instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=False) def test_xml_and_uri_no_kernel(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False) def test_xml_and_uri(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=True) def test_xml_and_uri_rescue(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=True, rescue=instance_data) def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self): instance_data = dict(self.test_instance) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False, rescue=instance_data) def test_xml_and_uri_rescue_no_kernel(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=True, rescue=instance_data) def test_xml_and_uri_rescue_no_ramdisk(self): instance_data = dict(self.test_instance) instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=False, rescue=instance_data) def test_xml_uuid(self): self._check_xml_and_uuid(self.test_image_meta) def test_lxc_container_and_uri(self): instance_data = dict(self.test_instance) self._check_xml_and_container(instance_data) def test_xml_disk_prefix(self): instance_data = dict(self.test_instance) self._check_xml_and_disk_prefix(instance_data, None) def test_xml_user_specified_disk_prefix(self): instance_data = dict(self.test_instance) self._check_xml_and_disk_prefix(instance_data, 'sd') def test_xml_disk_driver(self): instance_data = dict(self.test_instance) self._check_xml_and_disk_driver(instance_data) def test_xml_disk_bus_virtio(self): image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self._check_xml_and_disk_bus(image_meta, None, (("disk", "virtio", "vda"),)) def test_xml_disk_bus_ide(self): # It's necessary to check if the architecture is power, because # power doesn't have support to ide, and so libvirt translate # all ide calls to scsi expected = {arch.PPC: ("cdrom", "scsi", "sda"), arch.PPC64: ("cdrom", "scsi", "sda"), arch.PPC64LE: ("cdrom", "scsi", "sda"), arch.AARCH64: ("cdrom", "scsi", "sda")} expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}), ("cdrom", "ide", "hda")) image_meta = objects.ImageMeta.from_dict({ "disk_format": "iso"}) self._check_xml_and_disk_bus(image_meta, None, (expec_val,)) def test_xml_disk_bus_ide_and_virtio(self): # It's necessary to check if the architecture is power, because # power doesn't have support to ide, and so libvirt translate # all ide calls to scsi expected = {arch.PPC: ("cdrom", "scsi", "sda"), arch.PPC64: ("cdrom", "scsi", "sda"), arch.PPC64LE: ("cdrom", "scsi", "sda"), arch.AARCH64: ("cdrom", "scsi", "sda")} swap = {'device_name': '/dev/vdc', 'swap_size': 1} ephemerals = [{'device_type': 'disk', 'disk_bus': 'virtio', 'device_name': '/dev/vdb', 'size': 1}] block_device_info = { 'swap': swap, 'ephemerals': ephemerals} expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}), ("cdrom", "ide", "hda")) image_meta = objects.ImageMeta.from_dict({ "disk_format": "iso"}) self._check_xml_and_disk_bus(image_meta, block_device_info, (expec_val, ("disk", "virtio", "vdb"), ("disk", "virtio", "vdc"))) @mock.patch.object(host.Host, "list_instance_domains") def test_list_instances(self, mock_list): vm1 = FakeVirtDomain(id=3, name="instance00000001") vm2 = FakeVirtDomain(id=17, name="instance00000002") vm3 = FakeVirtDomain(name="instance00000003") vm4 = FakeVirtDomain(name="instance00000004") mock_list.return_value = [vm1, vm2, vm3, vm4] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) names = drvr.list_instances() self.assertEqual(names[0], vm1.name()) self.assertEqual(names[1], vm2.name()) self.assertEqual(names[2], vm3.name()) self.assertEqual(names[3], vm4.name()) mock_list.assert_called_with(only_guests=True, only_running=False) @mock.patch.object(host.Host, "list_instance_domains") def test_list_instance_uuids(self, mock_list): vm1 = FakeVirtDomain(id=3, name="instance00000001") vm2 = FakeVirtDomain(id=17, name="instance00000002") vm3 = FakeVirtDomain(name="instance00000003") vm4 = FakeVirtDomain(name="instance00000004") mock_list.return_value = [vm1, vm2, vm3, vm4] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) uuids = drvr.list_instance_uuids() self.assertEqual(len(uuids), 4) self.assertEqual(uuids[0], vm1.UUIDString()) self.assertEqual(uuids[1], vm2.UUIDString()) self.assertEqual(uuids[2], vm3.UUIDString()) self.assertEqual(uuids[3], vm4.UUIDString()) mock_list.assert_called_with(only_guests=True, only_running=False) @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.flags(vcpu_pin_set="4-5") get_online_cpus.return_value = set([4, 5, 6]) expected_vcpus = 2 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus_out_of_range(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.flags(vcpu_pin_set="4-6") get_online_cpus.return_value = set([4, 5]) self.assertRaises(exception.Invalid, drvr._get_vcpu_total) @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus_libvirt_error(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virNodeNumOfDevices', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) self.flags(vcpu_pin_set="4-6") get_online_cpus.side_effect = not_supported_exc self.assertRaises(exception.Invalid, drvr._get_vcpu_total) @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus_libvirt_error_success(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virNodeNumOfDevices', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) self.flags(vcpu_pin_set="1") get_online_cpus.side_effect = not_supported_exc expected_vcpus = 1 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) @mock.patch('nova.virt.libvirt.host.Host.get_cpu_count') def test_get_host_vcpus_after_hotplug(self, get_cpu_count): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) get_cpu_count.return_value = 2 expected_vcpus = 2 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) get_cpu_count.return_value = 3 expected_vcpus = 3 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) @mock.patch.object(host.Host, "has_min_version", return_value=True) def test_quiesce(self, mock_has_min_version): self.create_fake_libvirt_mock(lookupByName=self.fake_lookup) with mock.patch.object(FakeVirtDomain, "fsFreeze") as mock_fsfreeze: drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict( {"properties": {"hw_qemu_guest_agent": "yes"}}) self.assertIsNone(drvr.quiesce(self.context, instance, image_meta)) mock_fsfreeze.assert_called_once_with() def test_quiesce_not_supported(self): self.create_fake_libvirt_mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) self.assertRaises(exception.InstanceQuiesceNotSupported, drvr.quiesce, self.context, instance, None) @mock.patch.object(host.Host, "has_min_version", return_value=True) def test_unquiesce(self, mock_has_min_version): self.create_fake_libvirt_mock(getLibVersion=lambda: 1002005, lookupByName=self.fake_lookup) with mock.patch.object(FakeVirtDomain, "fsThaw") as mock_fsthaw: drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict( {"properties": {"hw_qemu_guest_agent": "yes"}}) self.assertIsNone(drvr.unquiesce(self.context, instance, image_meta)) mock_fsthaw.assert_called_once_with() def test_create_snapshot_metadata(self): base = objects.ImageMeta.from_dict( {'disk_format': 'raw'}) instance_data = {'kernel_id': 'kernel', 'project_id': 'prj_id', 'ramdisk_id': 'ram_id', 'os_type': None} instance = objects.Instance(**instance_data) img_fmt = 'raw' snp_name = 'snapshot_name' drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name) expected = {'is_public': False, 'status': 'active', 'name': snp_name, 'properties': { 'kernel_id': instance['kernel_id'], 'image_location': 'snapshot', 'image_state': 'available', 'owner_id': instance['project_id'], 'ramdisk_id': instance['ramdisk_id'], }, 'disk_format': img_fmt, 'container_format': 'bare', } self.assertEqual(ret, expected) # simulate an instance with os_type field defined # disk format equals to ami # container format not equals to bare instance['os_type'] = 'linux' base = objects.ImageMeta.from_dict( {'disk_format': 'ami', 'container_format': 'test_container'}) expected['properties']['os_type'] = instance['os_type'] expected['disk_format'] = base.disk_format expected['container_format'] = base.container_format ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name) self.assertEqual(ret, expected) def test_get_volume_driver(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) connection_info = {'driver_volume_type': 'fake', 'data': {'device_path': '/fake', 'access_mode': 'rw'}} driver = conn._get_volume_driver(connection_info) result = isinstance(driver, volume_drivers.LibvirtFakeVolumeDriver) self.assertTrue(result) def test_get_volume_driver_unknown(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) connection_info = {'driver_volume_type': 'unknown', 'data': {'device_path': '/fake', 'access_mode': 'rw'}} self.assertRaises( exception.VolumeDriverNotFound, conn._get_volume_driver, connection_info ) @mock.patch.object(volume_drivers.LibvirtFakeVolumeDriver, 'connect_volume') @mock.patch.object(volume_drivers.LibvirtFakeVolumeDriver, 'get_config') def test_get_volume_config(self, get_config, connect_volume): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) connection_info = {'driver_volume_type': 'fake', 'data': {'device_path': '/fake', 'access_mode': 'rw'}} bdm = {'device_name': 'vdb', 'disk_bus': 'fake-bus', 'device_type': 'fake-type'} disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'], 'dev': 'vdb'} mock_config = mock.MagicMock() get_config.return_value = mock_config config = drvr._get_volume_config(connection_info, disk_info) get_config.assert_called_once_with(connection_info, disk_info) self.assertEqual(mock_config, config) def test_attach_invalid_volume_type(self): self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup instance = objects.Instance(**self.test_instance) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.VolumeDriverNotFound, drvr.attach_volume, None, {"driver_volume_type": "badtype"}, instance, "/dev/sda") def test_attach_blockio_invalid_hypervisor(self): self.flags(virt_type='lxc', group='libvirt') self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup instance = objects.Instance(**self.test_instance) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.InvalidHypervisorType, drvr.attach_volume, None, {"driver_volume_type": "fake", "data": {"logical_block_size": "4096", "physical_block_size": "4096"} }, instance, "/dev/sda") def _test_check_discard(self, mock_log, driver_discard=None, bus=None, should_log=False): mock_config = mock.Mock() mock_config.driver_discard = driver_discard mock_config.target_bus = bus mock_instance = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._check_discard_for_attach_volume(mock_config, mock_instance) self.assertEqual(should_log, mock_log.called) @mock.patch('nova.virt.libvirt.driver.LOG.debug') def test_check_discard_for_attach_volume_no_unmap(self, mock_log): self._test_check_discard(mock_log, driver_discard=None, bus='scsi', should_log=False) @mock.patch('nova.virt.libvirt.driver.LOG.debug') def test_check_discard_for_attach_volume_blk_controller(self, mock_log): self._test_check_discard(mock_log, driver_discard='unmap', bus='virtio', should_log=True) @mock.patch('nova.virt.libvirt.driver.LOG.debug') def test_check_discard_for_attach_volume_valid_controller(self, mock_log): self._test_check_discard(mock_log, driver_discard='unmap', bus='scsi', should_log=False) @mock.patch('nova.virt.libvirt.driver.LOG.debug') def test_check_discard_for_attach_volume_blk_controller_no_unmap(self, mock_log): self._test_check_discard(mock_log, driver_discard=None, bus='virtio', should_log=False) @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm') @mock.patch('nova.virt.libvirt.host.Host.get_domain') def test_attach_volume_with_vir_domain_affect_live_flag(self, mock_get_domain, mock_get_info, get_image): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) image_meta = {} get_image.return_value = image_meta mock_dom = mock.MagicMock() mock_get_domain.return_value = mock_dom connection_info = {"driver_volume_type": "fake", "data": {"device_path": "/fake", "access_mode": "rw"}} bdm = {'device_name': 'vdb', 'disk_bus': 'fake-bus', 'device_type': 'fake-type'} disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'], 'dev': 'vdb'} mock_get_info.return_value = disk_info mock_conf = mock.MagicMock() flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE) with test.nested( mock.patch.object(drvr, '_connect_volume'), mock.patch.object(drvr, '_get_volume_config', return_value=mock_conf), mock.patch.object(drvr, '_set_cache_mode'), mock.patch.object(drvr, '_check_discard_for_attach_volume') ) as (mock_connect_volume, mock_get_volume_config, mock_set_cache_mode, mock_check_discard): for state in (power_state.RUNNING, power_state.PAUSED): mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678] drvr.attach_volume(self.context, connection_info, instance, "/dev/vdb", disk_bus=bdm['disk_bus'], device_type=bdm['device_type']) mock_get_domain.assert_called_with(instance) mock_get_info.assert_called_with( instance, CONF.libvirt.virt_type, test.MatchType(objects.ImageMeta), bdm) mock_connect_volume.assert_called_with( connection_info, disk_info) mock_get_volume_config.assert_called_with( connection_info, disk_info) mock_set_cache_mode.assert_called_with(mock_conf) mock_dom.attachDeviceFlags.assert_called_with( mock_conf.to_xml(), flags=flags) mock_check_discard.assert_called_with(mock_conf, instance) @mock.patch('nova.virt.libvirt.host.Host.get_domain') def test_detach_volume_with_vir_domain_affect_live_flag(self, mock_get_domain): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_xml_with_disk = """<domain> <devices> <disk type='file'> <source file='/path/to/fake-volume'/> <target dev='vdc' bus='virtio'/> </disk> </devices> </domain>""" mock_xml_without_disk = """<domain> <devices> </devices> </domain>""" mock_dom = mock.MagicMock() # Second time don't return anything about disk vdc so it looks removed return_list = [mock_xml_with_disk, mock_xml_without_disk] # Doubling the size of return list because we test with two guest power # states mock_dom.XMLDesc.side_effect = return_list + return_list connection_info = {"driver_volume_type": "fake", "data": {"device_path": "/fake", "access_mode": "rw"}} flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE) with mock.patch.object(drvr, '_disconnect_volume') as \ mock_disconnect_volume: for state in (power_state.RUNNING, power_state.PAUSED): mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678] mock_get_domain.return_value = mock_dom drvr.detach_volume(connection_info, instance, '/dev/vdc') mock_get_domain.assert_called_with(instance) mock_dom.detachDeviceFlags.assert_called_with("""<disk type="file" device="disk"> <source file="/path/to/fake-volume"/> <target bus="virtio" dev="vdc"/> </disk> """, flags=flags) mock_disconnect_volume.assert_called_with( connection_info, 'vdc') @mock.patch('nova.virt.libvirt.host.Host.get_domain') def test_detach_volume_disk_not_found(self, mock_get_domain): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_xml_without_disk = """<domain> <devices> </devices> </domain>""" mock_dom = mock.MagicMock(return_value=mock_xml_without_disk) connection_info = {"driver_volume_type": "fake", "data": {"device_path": "/fake", "access_mode": "rw"}} mock_dom.info.return_value = [power_state.RUNNING, 512, 512, 2, 1234, 5678] mock_get_domain.return_value = mock_dom self.assertRaises(exception.DiskNotFound, drvr.detach_volume, connection_info, instance, '/dev/vdc') mock_get_domain.assert_called_once_with(instance) def test_multi_nic(self): network_info = _fake_network_info(self, 2) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) interfaces = tree.findall("./devices/interface") self.assertEqual(len(interfaces), 2) self.assertEqual(interfaces[0].get('type'), 'bridge') def _behave_supports_direct_io(self, raise_open=False, raise_write=False, exc=ValueError()): open_behavior = os.open(os.path.join('.', '.directio.test'), os.O_CREAT | os.O_WRONLY | os.O_DIRECT) if raise_open: open_behavior.AndRaise(exc) else: open_behavior.AndReturn(3) write_bahavior = os.write(3, mox.IgnoreArg()) if raise_write: write_bahavior.AndRaise(exc) # ensure unlink(filepath) will actually remove the file by deleting # the remaining link to it in close(fd) os.close(3) os.unlink(3) def test_supports_direct_io(self): # O_DIRECT is not supported on all Python runtimes, so on platforms # where it's not supported (e.g. Mac), we can still test the code-path # by stubbing out the value. if not hasattr(os, 'O_DIRECT'): # `mock` seems to have trouble stubbing an attr that doesn't # originally exist, so falling back to stubbing out the attribute # directly. os.O_DIRECT = 16384 self.addCleanup(delattr, os, 'O_DIRECT') einval = OSError() einval.errno = errno.EINVAL self.mox.StubOutWithMock(os, 'open') self.mox.StubOutWithMock(os, 'write') self.mox.StubOutWithMock(os, 'close') self.mox.StubOutWithMock(os, 'unlink') _supports_direct_io = libvirt_driver.LibvirtDriver._supports_direct_io self._behave_supports_direct_io() self._behave_supports_direct_io(raise_write=True) self._behave_supports_direct_io(raise_open=True) self._behave_supports_direct_io(raise_write=True, exc=einval) self._behave_supports_direct_io(raise_open=True, exc=einval) self.mox.ReplayAll() self.assertTrue(_supports_direct_io('.')) self.assertRaises(ValueError, _supports_direct_io, '.') self.assertRaises(ValueError, _supports_direct_io, '.') self.assertFalse(_supports_direct_io('.')) self.assertFalse(_supports_direct_io('.')) self.mox.VerifyAll() def _check_xml_and_container(self, instance): instance_ref = objects.Instance(**instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertEqual(drvr._uri(), 'lxc:///') network_info = _fake_network_info(self, 1) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) check = [ (lambda t: t.find('.').get('type'), 'lxc'), (lambda t: t.find('./os/type').text, 'exe'), (lambda t: t.find('./devices/filesystem/target').get('dir'), '/')] for i, (check, expected_result) in enumerate(check): self.assertEqual(check(tree), expected_result, '%s failed common check %d' % (xml, i)) target = tree.find('./devices/filesystem/source').get('dir') self.assertGreater(len(target), 0) def _check_xml_and_disk_prefix(self, instance, prefix): instance_ref = objects.Instance(**instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) def _get_prefix(p, default): if p: return p + 'a' return default type_disk_map = { 'qemu': [ (lambda t: t.find('.').get('type'), 'qemu'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'vda'))], 'xen': [ (lambda t: t.find('.').get('type'), 'xen'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'xvda'))], 'kvm': [ (lambda t: t.find('.').get('type'), 'kvm'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'vda'))], 'uml': [ (lambda t: t.find('.').get('type'), 'uml'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'ubda'))] } for (virt_type, checks) in six.iteritems(type_disk_map): self.flags(virt_type=virt_type, group='libvirt') if prefix: self.flags(disk_prefix=prefix, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) network_info = _fake_network_info(self, 1) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), expected_result, '%s != %s failed check %d' % (check(tree), expected_result, i)) def _check_xml_and_disk_driver(self, image_meta): os_open = os.open directio_supported = True def os_open_stub(path, flags, *args, **kwargs): if flags & os.O_DIRECT: if not directio_supported: raise OSError(errno.EINVAL, '%s: %s' % (os.strerror(errno.EINVAL), path)) flags &= ~os.O_DIRECT return os_open(path, flags, *args, **kwargs) self.stub_out('os.open', os_open_stub) @staticmethod def connection_supports_direct_io_stub(dirpath): return directio_supported self.stubs.Set(libvirt_driver.LibvirtDriver, '_supports_direct_io', connection_supports_direct_io_stub) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) network_info = _fake_network_info(self, 1) drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) disks = tree.findall('./devices/disk/driver') for guest_disk in disks: self.assertEqual(guest_disk.get("cache"), "none") directio_supported = False # The O_DIRECT availability is cached on first use in # LibvirtDriver, hence we re-create it here drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) disks = tree.findall('./devices/disk/driver') for guest_disk in disks: self.assertEqual(guest_disk.get("cache"), "writethrough") def _check_xml_and_disk_bus(self, image_meta, block_device_info, wantConfig): instance_ref = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, block_device_info) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta, block_device_info=block_device_info) tree = etree.fromstring(xml) got_disks = tree.findall('./devices/disk') got_disk_targets = tree.findall('./devices/disk/target') for i in range(len(wantConfig)): want_device_type = wantConfig[i][0] want_device_bus = wantConfig[i][1] want_device_dev = wantConfig[i][2] got_device_type = got_disks[i].get('device') got_device_bus = got_disk_targets[i].get('bus') got_device_dev = got_disk_targets[i].get('dev') self.assertEqual(got_device_type, want_device_type) self.assertEqual(got_device_bus, want_device_bus) self.assertEqual(got_device_dev, want_device_dev) def _check_xml_and_uuid(self, image_meta): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) network_info = _fake_network_info(self, 1) drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) self.assertEqual(tree.find('./uuid').text, instance_ref['uuid']) @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_sysinfo_serial_hardware",) def _check_xml_and_uri(self, instance, mock_serial, expect_ramdisk=False, expect_kernel=False, rescue=None, expect_xen_hvm=False, xen_only=False): mock_serial.return_value = "cef19ce0-0ca2-11df-855d-b19fbce37686" instance_ref = objects.Instance(**instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) xen_vm_mode = vm_mode.XEN if expect_xen_hvm: xen_vm_mode = vm_mode.HVM type_uri_map = {'qemu': ('qemu:///system', [(lambda t: t.find('.').get('type'), 'qemu'), (lambda t: t.find('./os/type').text, vm_mode.HVM), (lambda t: t.find('./devices/emulator'), None)]), 'kvm': ('qemu:///system', [(lambda t: t.find('.').get('type'), 'kvm'), (lambda t: t.find('./os/type').text, vm_mode.HVM), (lambda t: t.find('./devices/emulator'), None)]), 'uml': ('uml:///system', [(lambda t: t.find('.').get('type'), 'uml'), (lambda t: t.find('./os/type').text, vm_mode.UML)]), 'xen': ('xen:///', [(lambda t: t.find('.').get('type'), 'xen'), (lambda t: t.find('./os/type').text, xen_vm_mode)])} if expect_xen_hvm or xen_only: hypervisors_to_check = ['xen'] else: hypervisors_to_check = ['qemu', 'kvm', 'xen'] for hypervisor_type in hypervisors_to_check: check_list = type_uri_map[hypervisor_type][1] if rescue: suffix = '.rescue' else: suffix = '' if expect_kernel: check = (lambda t: self.relpath(t.find('./os/kernel').text). split('/')[1], 'kernel' + suffix) else: check = (lambda t: t.find('./os/kernel'), None) check_list.append(check) if expect_kernel: check = (lambda t: "no_timer_check" in t.find('./os/cmdline'). text, hypervisor_type == "qemu") check_list.append(check) # Hypervisors that only support vm_mode.HVM and Xen # should not produce configuration that results in kernel # arguments if not expect_kernel and (hypervisor_type in ['qemu', 'kvm', 'xen']): check = (lambda t: t.find('./os/root'), None) check_list.append(check) check = (lambda t: t.find('./os/cmdline'), None) check_list.append(check) if expect_ramdisk: check = (lambda t: self.relpath(t.find('./os/initrd').text). split('/')[1], 'ramdisk' + suffix) else: check = (lambda t: t.find('./os/initrd'), None) check_list.append(check) if hypervisor_type in ['qemu', 'kvm']: xpath = "./sysinfo/system/entry" check = (lambda t: t.findall(xpath)[0].get("name"), "manufacturer") check_list.append(check) check = (lambda t: t.findall(xpath)[0].text, version.vendor_string()) check_list.append(check) check = (lambda t: t.findall(xpath)[1].get("name"), "product") check_list.append(check) check = (lambda t: t.findall(xpath)[1].text, version.product_string()) check_list.append(check) check = (lambda t: t.findall(xpath)[2].get("name"), "version") check_list.append(check) # NOTE(sirp): empty strings don't roundtrip in lxml (they are # converted to None), so we need an `or ''` to correct for that check = (lambda t: t.findall(xpath)[2].text or '', version.version_string_with_package()) check_list.append(check) check = (lambda t: t.findall(xpath)[3].get("name"), "serial") check_list.append(check) check = (lambda t: t.findall(xpath)[3].text, "cef19ce0-0ca2-11df-855d-b19fbce37686") check_list.append(check) check = (lambda t: t.findall(xpath)[4].get("name"), "uuid") check_list.append(check) check = (lambda t: t.findall(xpath)[4].text, instance['uuid']) check_list.append(check) if hypervisor_type in ['qemu', 'kvm']: check = (lambda t: t.findall('./devices/serial')[0].get( 'type'), 'file') check_list.append(check) check = (lambda t: t.findall('./devices/serial')[1].get( 'type'), 'pty') check_list.append(check) check = (lambda t: self.relpath(t.findall( './devices/serial/source')[0].get('path')). split('/')[1], 'console.log') check_list.append(check) else: check = (lambda t: t.find('./devices/console').get( 'type'), 'pty') check_list.append(check) common_checks = [ (lambda t: t.find('.').tag, 'domain'), (lambda t: t.find('./memory').text, '2097152')] if rescue: common_checks += [ (lambda t: self.relpath(t.findall('./devices/disk/source')[0]. get('file')).split('/')[1], 'disk.rescue'), (lambda t: self.relpath(t.findall('./devices/disk/source')[1]. get('file')).split('/')[1], 'disk')] else: common_checks += [(lambda t: self.relpath(t.findall( './devices/disk/source')[0].get('file')).split('/')[1], 'disk')] common_checks += [(lambda t: self.relpath(t.findall( './devices/disk/source')[1].get('file')).split('/')[1], 'disk.local')] for virt_type in hypervisors_to_check: expected_uri = type_uri_map[virt_type][0] checks = type_uri_map[virt_type][1] self.flags(virt_type=virt_type, group='libvirt') with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt: del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertEqual(drvr._uri(), expected_uri) network_info = _fake_network_info(self, 1) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, rescue=rescue) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta, rescue=rescue) tree = etree.fromstring(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), expected_result, '%s != %s failed check %d' % (check(tree), expected_result, i)) for i, (check, expected_result) in enumerate(common_checks): self.assertEqual(check(tree), expected_result, '%s != %s failed common check %d' % (check(tree), expected_result, i)) filterref = './devices/interface/filterref' vif = network_info[0] nic_id = vif['address'].lower().replace(':', '') fw = firewall.NWFilterFirewall(drvr) instance_filter_name = fw._instance_filter_name(instance_ref, nic_id) self.assertEqual(tree.find(filterref).get('filter'), instance_filter_name) # This test is supposed to make sure we don't # override a specifically set uri # # Deliberately not just assigning this string to CONF.connection_uri # and checking against that later on. This way we make sure the # implementation doesn't fiddle around with the CONF. testuri = 'something completely different' self.flags(connection_uri=testuri, group='libvirt') for (virt_type, (expected_uri, checks)) in six.iteritems(type_uri_map): self.flags(virt_type=virt_type, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertEqual(drvr._uri(), testuri) def test_ensure_filtering_rules_for_instance_timeout(self): # ensure_filtering_fules_for_instance() finishes with timeout. # Preparing mocks def fake_none(self, *args): return class FakeTime(object): def __init__(self): self.counter = 0 def sleep(self, t): self.counter += t fake_timer = FakeTime() def fake_sleep(t): fake_timer.sleep(t) # _fake_network_info must be called before create_fake_libvirt_mock(), # as _fake_network_info calls importutils.import_class() and # create_fake_libvirt_mock() mocks importutils.import_class(). network_info = _fake_network_info(self, 1) self.create_fake_libvirt_mock() instance_ref = objects.Instance(**self.test_instance) # Start test self.mox.ReplayAll() try: drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr.firewall_driver, 'setup_basic_filtering', fake_none) self.stubs.Set(drvr.firewall_driver, 'prepare_instance_filter', fake_none) self.stubs.Set(drvr.firewall_driver, 'instance_filter_exists', fake_none) self.stubs.Set(greenthread, 'sleep', fake_sleep) drvr.ensure_filtering_rules_for_instance(instance_ref, network_info) except exception.NovaException as e: msg = ('The firewall filter for %s does not exist' % instance_ref['name']) c1 = (0 <= six.text_type(e).find(msg)) self.assertTrue(c1) self.assertEqual(29, fake_timer.counter, "Didn't wait the expected " "amount of time") @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file') @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_all_pass_with_block_migration( self, mock_cpu, mock_test_file, mock_svc): instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'disk_available_least': 400, 'cpu_info': 'asdf', } filename = "file" # _check_cpu_match mock_cpu.return_value = 1 # mounted_on_same_shared_storage mock_test_file.return_value = filename # No need for the src_compute_info return_value = drvr.check_can_live_migrate_destination(self.context, instance_ref, None, compute_info, True) return_value.is_volume_backed = False self.assertThat({"filename": "file", 'image_type': 'default', 'disk_available_mb': 409600, "disk_over_commit": False, "block_migration": True, "is_volume_backed": False}, matchers.DictMatches(return_value.to_legacy_dict())) @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file') @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_all_pass_no_block_migration( self, mock_cpu, mock_test_file, mock_svc): instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'disk_available_least': 400, 'cpu_info': 'asdf', } filename = "file" # _check_cpu_match mock_cpu.return_value = 1 # mounted_on_same_shared_storage mock_test_file.return_value = filename # No need for the src_compute_info return_value = drvr.check_can_live_migrate_destination(self.context, instance_ref, None, compute_info, False) return_value.is_volume_backed = False self.assertThat({"filename": "file", "image_type": 'default', "block_migration": False, "disk_over_commit": False, "disk_available_mb": 409600, "is_volume_backed": False}, matchers.DictMatches(return_value.to_legacy_dict())) @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file', return_value='fake') @mock.patch.object(libvirt_driver.LibvirtDriver, '_compare_cpu') def test_check_can_live_migrate_guest_cpu_none_model( self, mock_cpu, mock_test_file): # Tests that when instance.vcpu_model.model is None, the host cpu # model is used for live migration. instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel instance_ref.vcpu_model.model = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1} result = drvr.check_can_live_migrate_destination( self.context, instance_ref, compute_info, compute_info) result.is_volume_backed = False mock_cpu.assert_called_once_with(None, 'asdf', instance_ref) expected_result = {"filename": 'fake', "image_type": CONF.libvirt.images_type, "block_migration": False, "disk_over_commit": False, "disk_available_mb": 1024, "is_volume_backed": False} self.assertEqual(expected_result, result.to_legacy_dict()) @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file') @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_no_instance_cpu_info( self, mock_cpu, mock_test_file, mock_svc): instance_ref = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'cpu_info': jsonutils.dumps({ "vendor": "AMD", "arch": arch.I686, "features": ["sse3"], "model": "Opteron_G3", "topology": {"cores": 2, "threads": 1, "sockets": 4} }), 'disk_available_least': 1} filename = "file" # _check_cpu_match mock_cpu.return_value = 1 # mounted_on_same_shared_storage mock_test_file.return_value = filename return_value = drvr.check_can_live_migrate_destination(self.context, instance_ref, compute_info, compute_info, False) # NOTE(danms): Compute manager would have set this, so set it here return_value.is_volume_backed = False self.assertThat({"filename": "file", "image_type": 'default', "block_migration": False, "disk_over_commit": False, "disk_available_mb": 1024, "is_volume_backed": False}, matchers.DictMatches(return_value.to_legacy_dict())) @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_incompatible_cpu_raises( self, mock_cpu, mock_svc): instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1} mock_cpu.side_effect = exception.InvalidCPUInfo(reason='foo') self.assertRaises(exception.InvalidCPUInfo, drvr.check_can_live_migrate_destination, self.context, instance_ref, compute_info, compute_info, False) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_compatible_host_cpu(self, mock_vconfig, mock_compare): instance = objects.Instance(**self.test_instance) mock_compare.return_value = 5 conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info), instance) self.assertIsNone(ret) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_handles_not_supported_error_gracefully(self, mock_vconfig, mock_compare): instance = objects.Instance(**self.test_instance) not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virCompareCPU', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) mock_compare.side_effect = not_supported_exc conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info), instance) self.assertIsNone(ret) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt.LibvirtDriver, '_vcpu_model_to_cpu_config') def test_compare_cpu_compatible_guest_cpu(self, mock_vcpu_to_cpu, mock_compare): instance = objects.Instance(**self.test_instance) mock_compare.return_value = 6 conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(jsonutils.dumps(_fake_cpu_info), None, instance) self.assertIsNone(ret) def test_compare_cpu_virt_type_xen(self): instance = objects.Instance(**self.test_instance) self.flags(virt_type='xen', group='libvirt') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(None, None, instance) self.assertIsNone(ret) def test_compare_cpu_virt_type_qemu(self): instance = objects.Instance(**self.test_instance) self.flags(virt_type='qemu', group='libvirt') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(None, None, instance) self.assertIsNone(ret) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_invalid_cpuinfo_raises(self, mock_vconfig, mock_compare): instance = objects.Instance(**self.test_instance) mock_compare.return_value = 0 conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.InvalidCPUInfo, conn._compare_cpu, None, jsonutils.dumps(_fake_cpu_info), instance) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_incompatible_cpu_raises(self, mock_vconfig, mock_compare): instance = objects.Instance(**self.test_instance) mock_compare.side_effect = fakelibvirt.libvirtError('cpu') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.MigrationPreCheckError, conn._compare_cpu, None, jsonutils.dumps(_fake_cpu_info), instance) def test_check_can_live_migrate_dest_cleanup_works_correctly(self): objects.Instance(**self.test_instance) dest_check_data = objects.LibvirtLiveMigrateData( filename="file", block_migration=True, disk_over_commit=False, disk_available_mb=1024) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(drvr, '_cleanup_shared_storage_test_file') drvr._cleanup_shared_storage_test_file("file") self.mox.ReplayAll() drvr.cleanup_live_migration_destination_check(self.context, dest_check_data) @mock.patch('os.path.exists', return_value=True) @mock.patch('os.utime') def test_check_shared_storage_test_file_exists(self, mock_utime, mock_path_exists): tmpfile_path = os.path.join(CONF.instances_path, 'tmp123') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertTrue(drvr._check_shared_storage_test_file( 'tmp123', mock.sentinel.instance)) mock_utime.assert_called_once_with(CONF.instances_path, None) mock_path_exists.assert_called_once_with(tmpfile_path) @mock.patch('os.path.exists', return_value=False) @mock.patch('os.utime') def test_check_shared_storage_test_file_does_not_exist(self, mock_utime, mock_path_exists): tmpfile_path = os.path.join(CONF.instances_path, 'tmp123') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._check_shared_storage_test_file( 'tmp123', mock.sentinel.instance)) mock_utime.assert_called_once_with(CONF.instances_path, None) mock_path_exists.assert_called_once_with(tmpfile_path) def _mock_can_live_migrate_source(self, block_migration=False, is_shared_block_storage=False, is_shared_instance_path=False, is_booted_from_volume=False, disk_available_mb=1024, block_device_info=None, block_device_text=None): instance = objects.Instance(**self.test_instance) dest_check_data = objects.LibvirtLiveMigrateData( filename='file', image_type='default', block_migration=block_migration, disk_over_commit=False, disk_available_mb=disk_available_mb) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(drvr, '_is_shared_block_storage') drvr._is_shared_block_storage(instance, dest_check_data, block_device_info).AndReturn(is_shared_block_storage) self.mox.StubOutWithMock(drvr, '_check_shared_storage_test_file') drvr._check_shared_storage_test_file('file', instance).AndReturn( is_shared_instance_path) self.mox.StubOutWithMock(drvr, "get_instance_disk_info") drvr.get_instance_disk_info(instance, block_device_info=block_device_info).\ AndReturn(block_device_text) self.mox.StubOutWithMock(drvr, '_is_booted_from_volume') drvr._is_booted_from_volume(instance, block_device_text).AndReturn( is_booted_from_volume) return (instance, dest_check_data, drvr) def test_check_can_live_migrate_source_block_migration(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True) self.mox.StubOutWithMock(drvr, "_assert_dest_node_has_enough_disk") drvr._assert_dest_node_has_enough_disk( self.context, instance, dest_check_data.disk_available_mb, False, None) self.mox.ReplayAll() ret = drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) self.assertIsInstance(ret, objects.LibvirtLiveMigrateData) self.assertIn('is_shared_block_storage', ret) self.assertIn('is_shared_instance_path', ret) def test_check_can_live_migrate_source_shared_block_storage(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_shared_block_storage=True) self.mox.ReplayAll() drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) def test_check_can_live_migrate_source_shared_instance_path(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_shared_instance_path=True) self.mox.ReplayAll() drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) def test_check_can_live_migrate_source_non_shared_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source() self.mox.ReplayAll() self.assertRaises(exception.InvalidSharedStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_source_shared_block_migration_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True, is_shared_block_storage=True) self.mox.ReplayAll() self.assertRaises(exception.InvalidLocalStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_shared_path_block_migration_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True, is_shared_instance_path=True) self.mox.ReplayAll() self.assertRaises(exception.InvalidLocalStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data, None) def test_check_can_live_migrate_non_shared_non_block_migration_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source() self.mox.ReplayAll() self.assertRaises(exception.InvalidSharedStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_source_with_dest_not_enough_disk(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True, disk_available_mb=0) drvr.get_instance_disk_info(instance, block_device_info=None).AndReturn( '[{"virt_disk_size":2}]') self.mox.ReplayAll() self.assertRaises(exception.MigrationError, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_source_booted_from_volume(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_booted_from_volume=True, block_device_text='[]') self.mox.ReplayAll() drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) def test_check_can_live_migrate_source_booted_from_volume_with_swap(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_booted_from_volume=True, block_device_text='[{"path":"disk.swap"}]') self.mox.ReplayAll() self.assertRaises(exception.InvalidSharedStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) @mock.patch.object(host.Host, 'has_min_version', return_value=False) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_has_local_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_booted_from_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' 'get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_shared_block_storage', return_value=False) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_check_shared_storage_test_file', return_value=False) def test_check_can_live_migrate_source_block_migration_with_bdm_error( self, mock_check, mock_shared_block, mock_get_bdi, mock_booted_from_volume, mock_has_local, mock_enough, mock_min_version): bdi = {'block_device_mapping': ['bdm']} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dest_check_data = objects.LibvirtLiveMigrateData( filename='file', image_type='default', block_migration=True, disk_over_commit=False, disk_available_mb=100) self.assertRaises(exception.MigrationPreCheckError, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data, block_device_info=bdi) @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_has_local_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_booted_from_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' 'get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_shared_block_storage', return_value=False) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_check_shared_storage_test_file', return_value=False) def test_check_can_live_migrate_source_bm_with_bdm_tunnelled_error( self, mock_check, mock_shared_block, mock_get_bdi, mock_booted_from_volume, mock_has_local, mock_enough, mock_min_version): self.flags(live_migration_tunnelled=True, group='libvirt') bdi = {'block_device_mapping': ['bdm']} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dest_check_data = objects.LibvirtLiveMigrateData( filename='file', image_type='default', block_migration=True, disk_over_commit=False, disk_available_mb=100) drvr._parse_migration_flags() self.assertRaises(exception.MigrationPreCheckError, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data, block_device_info=bdi) @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_has_local_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_booted_from_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' 'get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_shared_block_storage') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_check_shared_storage_test_file') def _test_check_can_live_migrate_source_block_migration_none( self, block_migrate, is_shared_instance_path, is_share_block, mock_check, mock_shared_block, mock_get_bdi, mock_booted_from_volume, mock_has_local, mock_enough, mock_verson): mock_check.return_value = is_shared_instance_path mock_shared_block.return_value = is_share_block instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dest_check_data = objects.LibvirtLiveMigrateData( filename='file', image_type='default', disk_over_commit=False, disk_available_mb=100) dest_check_data_ret = drvr.check_can_live_migrate_source( self.context, instance, dest_check_data) self.assertEqual(block_migrate, dest_check_data_ret.block_migration) def test_check_can_live_migrate_source_block_migration_none_shared1(self): self._test_check_can_live_migrate_source_block_migration_none( False, True, False) def test_check_can_live_migrate_source_block_migration_none_shared2(self): self._test_check_can_live_migrate_source_block_migration_none( False, False, True) def test_check_can_live_migrate_source_block_migration_none_no_share(self): self._test_check_can_live_migrate_source_block_migration_none( True, False, False) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_has_local_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_booted_from_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' 'get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_shared_block_storage') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_check_shared_storage_test_file') def test_check_can_live_migration_source_disk_over_commit_none(self, mock_check, mock_shared_block, mock_get_bdi, mock_booted_from_volume, mock_has_local, mock_enough, mock_disk_check): mock_check.return_value = False mock_shared_block.return_value = False instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dest_check_data = objects.LibvirtLiveMigrateData( filename='file', image_type='default', disk_available_mb=100) drvr.check_can_live_migrate_source( self.context, instance, dest_check_data) self.assertFalse(mock_disk_check.called) def _is_shared_block_storage_test_create_mocks(self, disks): # Test data instance_xml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>{}</devices></domain>") disks_xml = '' for dsk in disks: if dsk['type'] is not 'network': disks_xml = ''.join([disks_xml, "<disk type='{type}'>" "<driver name='qemu' type='{driver}'/>" "<source {source}='{source_path}'/>" "<target dev='{target_dev}' bus='virtio'/>" "</disk>".format(**dsk)]) else: disks_xml = ''.join([disks_xml, "<disk type='{type}'>" "<driver name='qemu' type='{driver}'/>" "<source protocol='{source_proto}'" "name='{source_image}' >" "<host name='hostname' port='7000'/>" "<config file='/path/to/file'/>" "</source>" "<target dev='{target_dev}'" "bus='ide'/>".format(**dsk)]) # Preparing mocks mock_virDomain = mock.Mock(fakelibvirt.virDomain) mock_virDomain.XMLDesc = mock.Mock() mock_virDomain.XMLDesc.return_value = (instance_xml.format(disks_xml)) mock_lookup = mock.Mock() def mock_lookup_side_effect(name): return mock_virDomain mock_lookup.side_effect = mock_lookup_side_effect mock_getsize = mock.Mock() mock_getsize.return_value = "10737418240" return (mock_getsize, mock_lookup) def test_is_shared_block_storage_rbd(self): self.flags(images_type='rbd', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_instance_disk_info = mock.Mock() data = objects.LibvirtLiveMigrateData(image_type='rbd') with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertTrue(drvr._is_shared_block_storage(instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) self.assertTrue(drvr._is_storage_shared_with('foo', 'bar')) def test_is_shared_block_storage_lvm(self): self.flags(images_type='lvm', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) data = objects.LibvirtLiveMigrateData(image_type='lvm', is_volume_backed=False, is_shared_instance_path=False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_qcow2(self): self.flags(images_type='qcow2', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) data = objects.LibvirtLiveMigrateData(image_type='qcow2', is_volume_backed=False, is_shared_instance_path=False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_rbd_only_source(self): self.flags(images_type='rbd', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) data = objects.LibvirtLiveMigrateData(is_shared_instance_path=False, is_volume_backed=False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_rbd_only_dest(self): bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) data = objects.LibvirtLiveMigrateData(image_type='rbd', is_volume_backed=False, is_shared_instance_path=False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_volume_backed(self): disks = [{'type': 'block', 'driver': 'raw', 'source': 'dev', 'source_path': '/dev/disk', 'target_dev': 'vda'}] bdi = {'block_device_mapping': [ {'connection_info': 'info', 'mount_device': '/dev/vda'}]} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) (mock_getsize, mock_lookup) =\ self._is_shared_block_storage_test_create_mocks(disks) data = objects.LibvirtLiveMigrateData(is_volume_backed=True, is_shared_instance_path=False) with mock.patch.object(host.Host, 'get_domain', mock_lookup): self.assertTrue(drvr._is_shared_block_storage(instance, data, block_device_info = bdi)) mock_lookup.assert_called_once_with(instance) def test_is_shared_block_storage_volume_backed_with_disk(self): disks = [{'type': 'block', 'driver': 'raw', 'source': 'dev', 'source_path': '/dev/disk', 'target_dev': 'vda'}, {'type': 'file', 'driver': 'raw', 'source': 'file', 'source_path': '/instance/disk.local', 'target_dev': 'vdb'}] bdi = {'block_device_mapping': [ {'connection_info': 'info', 'mount_device': '/dev/vda'}]} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) (mock_getsize, mock_lookup) =\ self._is_shared_block_storage_test_create_mocks(disks) data = objects.LibvirtLiveMigrateData(is_volume_backed=True, is_shared_instance_path=False) with test.nested( mock.patch.object(os.path, 'getsize', mock_getsize), mock.patch.object(host.Host, 'get_domain', mock_lookup)): self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info = bdi)) mock_getsize.assert_called_once_with('/instance/disk.local') mock_lookup.assert_called_once_with(instance) def test_is_shared_block_storage_nfs(self): bdi = {'block_device_mapping': []} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_backend = mock.MagicMock() mock_image_backend.backend.return_value = mock_backend mock_backend.is_file_in_instance_path.return_value = True mock_get_instance_disk_info = mock.Mock() data = objects.LibvirtLiveMigrateData( is_shared_instance_path=True, image_type='foo') with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): self.assertTrue(drvr._is_shared_block_storage( 'instance', data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_live_migration_update_graphics_xml(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) xml_tmpl = ("<domain type='kvm'>" "<devices>" "<graphics type='vnc' listen='{vnc}'>" "<listen address='{vnc}'/>" "</graphics>" "<graphics type='spice' listen='{spice}'>" "<listen address='{spice}'/>" "</graphics>" "</devices>" "</domain>") initial_xml = xml_tmpl.format(vnc='1.2.3.4', spice='5.6.7.8') target_xml = xml_tmpl.format(vnc='10.0.0.1', spice='10.0.0.2') target_xml = etree.tostring(etree.fromstring(target_xml)) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) guest = libvirt_guest.Guest(vdmock) self.mox.StubOutWithMock(vdmock, "migrateToURI2") _bandwidth = CONF.libvirt.live_migration_bandwidth vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn( initial_xml) vdmock.migrateToURI2(drvr._live_migration_uri('dest'), dxml=target_xml, flags=mox.IgnoreArg(), bandwidth=_bandwidth).AndRaise( fakelibvirt.libvirtError("ERR")) # start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='10.0.0.1', graphics_listen_addr_spice='10.0.0.2', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=False) self.mox.ReplayAll() self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, guest, []) def test_live_migration_update_volume_xml(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) target_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'cde.67890.opst-lun-Z') # start test connection_info = { u'driver_volume_type': u'iscsi', u'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'data': { u'access_mode': u'rw', u'target_discovered': False, u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', 'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', }, } bdm = objects.LibvirtLiveMigrateBDMInfo( serial='58a84f6d-3f0c-4e19-a0af-eb657b790657', bus='virtio', type='disk', dev='vdb', connection_info=connection_info) migrate_data = objects.LibvirtLiveMigrateData( serial_listen_addr='', target_connect_addr=None, bdms=[bdm], block_migration=False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) test_mock = mock.MagicMock() guest = libvirt_guest.Guest(test_mock) with mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') as \ mget_info,\ mock.patch.object(drvr._host, 'get_domain') as mget_domain,\ mock.patch.object(fakelibvirt.virDomain, 'migrateToURI2'),\ mock.patch.object( libvirt_migrate, 'get_updated_guest_xml') as mupdate: mget_info.side_effect = exception.InstanceNotFound( instance_id='foo') mget_domain.return_value = test_mock test_mock.XMLDesc.return_value = target_xml self.assertFalse(drvr._live_migration_operation( self.context, instance_ref, 'dest', False, migrate_data, guest, [])) mupdate.assert_called_once_with( guest, migrate_data, mock.ANY) def test_live_migration_with_valid_target_connect_addr(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) target_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'cde.67890.opst-lun-Z') # start test connection_info = { u'driver_volume_type': u'iscsi', u'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'data': { u'access_mode': u'rw', u'target_discovered': False, u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', 'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', }, } bdm = objects.LibvirtLiveMigrateBDMInfo( serial='58a84f6d-3f0c-4e19-a0af-eb657b790657', bus='virtio', type='disk', dev='vdb', connection_info=connection_info) migrate_data = objects.LibvirtLiveMigrateData( serial_listen_addr='', target_connect_addr='127.0.0.2', bdms=[bdm], block_migration=False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) test_mock = mock.MagicMock() guest = libvirt_guest.Guest(test_mock) with mock.patch.object(libvirt_migrate, 'get_updated_guest_xml') as mupdate: test_mock.XMLDesc.return_value = target_xml drvr._live_migration_operation(self.context, instance_ref, 'dest', False, migrate_data, guest, []) test_mock.migrateToURI2.assert_called_once_with( 'qemu+tcp://127.0.0.2/system', dxml=mupdate(), flags=0, bandwidth=0) def test_update_volume_xml(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) initial_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'cde.67890.opst-lun-Z') target_xml = etree.tostring(etree.fromstring(target_xml)) serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657" bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial, bus='virtio', type='disk', dev='vdb') bdmi.connection_info = {u'driver_volume_type': u'iscsi', 'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'data': {u'access_mode': u'rw', u'target_discovered': False, u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', 'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}} conf = vconfig.LibvirtConfigGuestDisk() conf.source_device = bdmi.type conf.driver_name = "qemu" conf.driver_format = "raw" conf.driver_cache = "none" conf.target_dev = bdmi.dev conf.target_bus = bdmi.bus conf.serial = bdmi.connection_info.get('serial') conf.source_type = "block" conf.source_path = bdmi.connection_info['data'].get('device_path') guest = libvirt_guest.Guest(mock.MagicMock()) with test.nested( mock.patch.object(drvr, '_get_volume_config', return_value=conf), mock.patch.object(guest, 'get_xml_desc', return_value=initial_xml)): config = libvirt_migrate.get_updated_guest_xml(guest, objects.LibvirtLiveMigrateData(bdms=[bdmi]), drvr._get_volume_config) parser = etree.XMLParser(remove_blank_text=True) config = etree.fromstring(config, parser) target_xml = etree.fromstring(target_xml, parser) self.assertEqual(etree.tostring(target_xml), etree.tostring(config)) def test_live_migration_uri(self): hypervisor_uri_map = ( ('xen', 'xenmigr://%s/system'), ('kvm', 'qemu+tcp://%s/system'), ('qemu', 'qemu+tcp://%s/system'), # anything else will return None ('lxc', None), ('parallels', None), ) dest = 'destination' for hyperv, uri in hypervisor_uri_map: self.flags(virt_type=hyperv, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) if uri is not None: uri = uri % dest self.assertEqual(uri, drvr._live_migration_uri(dest)) else: self.assertRaises(exception.LiveMigrationURINotAvailable, drvr._live_migration_uri, dest) def test_live_migration_uri_forced(self): dest = 'destination' for hyperv in ('kvm', 'xen'): self.flags(virt_type=hyperv, group='libvirt') forced_uri = 'foo://%s/bar' self.flags(live_migration_uri=forced_uri, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(forced_uri % dest, drvr._live_migration_uri(dest)) def test_update_volume_xml_no_serial(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) xml_tmpl = """ <domain type='kvm'> <devices> <disk type='block' device='disk'> <driver name='qemu' type='raw' cache='none'/> <source dev='{device_path}'/> <target bus='virtio' dev='vdb'/> <serial></serial> <address type='pci' domain='0x0' bus='0x0' slot='0x04' \ function='0x0'/> </disk> </devices> </domain> """ initial_xml = xml_tmpl.format(device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = xml_tmpl.format(device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = etree.tostring(etree.fromstring(target_xml)) serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657" connection_info = { u'driver_volume_type': u'iscsi', 'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'data': { u'access_mode': u'rw', u'target_discovered': False, u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', }, } bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial, bus='virtio', dev='vdb', type='disk') bdmi.connection_info = connection_info conf = vconfig.LibvirtConfigGuestDisk() conf.source_device = bdmi.type conf.driver_name = "qemu" conf.driver_format = "raw" conf.driver_cache = "none" conf.target_dev = bdmi.dev conf.target_bus = bdmi.bus conf.serial = bdmi.connection_info.get('serial') conf.source_type = "block" conf.source_path = bdmi.connection_info['data'].get('device_path') guest = libvirt_guest.Guest(mock.MagicMock()) with test.nested( mock.patch.object(drvr, '_get_volume_config', return_value=conf), mock.patch.object(guest, 'get_xml_desc', return_value=initial_xml)): config = libvirt_migrate.get_updated_guest_xml(guest, objects.LibvirtLiveMigrateData(bdms=[bdmi]), drvr._get_volume_config) self.assertEqual(target_xml, config) def test_update_volume_xml_no_connection_info(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) initial_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = etree.tostring(etree.fromstring(target_xml)) serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657" bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial, dev='vdb', type='disk', bus='scsi', format='qcow') bdmi.connection_info = {} conf = vconfig.LibvirtConfigGuestDisk() guest = libvirt_guest.Guest(mock.MagicMock()) with test.nested( mock.patch.object(drvr, '_get_volume_config', return_value=conf), mock.patch.object(guest, 'get_xml_desc', return_value=initial_xml)): config = libvirt_migrate.get_updated_guest_xml( guest, objects.LibvirtLiveMigrateData(bdms=[bdmi]), drvr._get_volume_config) self.assertEqual(target_xml, config) @mock.patch.object(fakelibvirt.virDomain, "migrateToURI2") @mock.patch.object(fakelibvirt.virDomain, "XMLDesc") def test_live_migration_update_serial_console_xml(self, mock_xml, mock_migrate): self.compute = importutils.import_object(CONF.compute_manager) instance_ref = self.test_instance xml_tmpl = ("<domain type='kvm'>" "<devices>" "<console type='tcp'>" "<source mode='bind' host='{addr}' service='10000'/>" "</console>" "</devices>" "</domain>") initial_xml = xml_tmpl.format(addr='9.0.0.1') target_xml = xml_tmpl.format(addr='9.0.0.12') target_xml = etree.tostring(etree.fromstring(target_xml)) # Preparing mocks mock_xml.return_value = initial_xml mock_migrate.side_effect = fakelibvirt.libvirtError("ERR") # start test bandwidth = CONF.libvirt.live_migration_bandwidth migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='10.0.0.1', graphics_listen_addr_spice='10.0.0.2', serial_listen_addr='9.0.0.12', target_connect_addr=None, bdms=[], block_migration=False) dom = fakelibvirt.virDomain guest = libvirt_guest.Guest(dom) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, guest, []) mock_xml.assert_called_once_with( flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE) mock_migrate.assert_called_once_with( drvr._live_migration_uri('dest'), dxml=target_xml, flags=mock.ANY, bandwidth=bandwidth) @mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True) def test_live_migration_fails_with_serial_console_without_migratable(self): self.compute = importutils.import_object(CONF.compute_manager) instance_ref = self.test_instance CONF.set_override("enabled", True, "serial_console") dom = fakelibvirt.virDomain migrate_data = objects.LibvirtLiveMigrateData( serial_listen_addr='', target_connect_addr=None, block_migration=False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.MigrationError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, dom, []) @mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True) def test_live_migration_uses_migrateToURI_without_migratable_flag(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) guest = libvirt_guest.Guest(vdmock) self.mox.StubOutWithMock(vdmock, "migrateToURI") _bandwidth = CONF.libvirt.live_migration_bandwidth vdmock.migrateToURI(drvr._live_migration_uri('dest'), flags=mox.IgnoreArg(), bandwidth=_bandwidth).AndRaise( fakelibvirt.libvirtError("ERR")) # start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='0.0.0.0', graphics_listen_addr_spice='0.0.0.0', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=False) self.mox.ReplayAll() self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, guest, []) def test_live_migration_uses_migrateToURI_without_dest_listen_addrs(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) guest = libvirt_guest.Guest(vdmock) self.mox.StubOutWithMock(vdmock, "migrateToURI") _bandwidth = CONF.libvirt.live_migration_bandwidth vdmock.migrateToURI(drvr._live_migration_uri('dest'), flags=mox.IgnoreArg(), bandwidth=_bandwidth).AndRaise( fakelibvirt.libvirtError("ERR")) # start test migrate_data = objects.LibvirtLiveMigrateData( serial_listen_addr='', target_connect_addr=None, bdms=[], block_migration=False) self.mox.ReplayAll() self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, guest, []) @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(fakelibvirt.virDomain, "migrateToURI3") @mock.patch('nova.virt.libvirt.migration.get_updated_guest_xml', return_value='') @mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc', return_value='<xml></xml>') def test_live_migration_uses_migrateToURI3( self, mock_old_xml, mock_new_xml, mock_migrateToURI3, mock_min_version): # Preparing mocks disk_paths = ['vda', 'vdb'] params = { 'migrate_disks': ['vda', 'vdb'], 'bandwidth': CONF.libvirt.live_migration_bandwidth, 'destination_xml': '', } mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR") # Start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='0.0.0.0', graphics_listen_addr_spice='0.0.0.0', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=False) dom = fakelibvirt.virDomain guest = libvirt_guest.Guest(dom) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance, 'dest', False, migrate_data, guest, disk_paths) mock_migrateToURI3.assert_called_once_with( drvr._live_migration_uri('dest'), params=params, flags=0) @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(fakelibvirt.virDomain, "migrateToURI3") @mock.patch('nova.virt.libvirt.migration.get_updated_guest_xml', return_value='') @mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc', return_value='') def test_block_live_migration_tunnelled_migrateToURI3( self, mock_old_xml, mock_new_xml, mock_migrateToURI3, mock_min_version): self.flags(live_migration_tunnelled=True, group='libvirt') # Preparing mocks disk_paths = [] params = { 'bandwidth': CONF.libvirt.live_migration_bandwidth, 'destination_xml': '', } # Start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='0.0.0.0', graphics_listen_addr_spice='0.0.0.0', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=True) dom = fakelibvirt.virDomain guest = libvirt_guest.Guest(dom) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._parse_migration_flags() instance = objects.Instance(**self.test_instance) drvr._live_migration_operation(self.context, instance, 'dest', True, migrate_data, guest, disk_paths) mock_migrateToURI3.assert_called_once_with( drvr._live_migration_uri('dest'), params=params, flags=151) @mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True) def test_live_migration_fails_without_migratable_flag_or_0_addr(self): self.flags(enabled=True, vncserver_listen='1.2.3.4', group='vnc') self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI") # start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='1.2.3.4', graphics_listen_addr_spice='1.2.3.4', serial_listen_addr='127.0.0.1', target_connect_addr=None, block_migration=False) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.MigrationError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock, []) def test_live_migration_raises_exception(self): # Confirms recover method is called when exceptions are raised. # Preparing data self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) guest = libvirt_guest.Guest(vdmock) self.mox.StubOutWithMock(vdmock, "migrateToURI2") _bandwidth = CONF.libvirt.live_migration_bandwidth if getattr(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None) is None: vdmock.migrateToURI(drvr._live_migration_uri('dest'), flags=mox.IgnoreArg(), bandwidth=_bandwidth).AndRaise( fakelibvirt.libvirtError('ERR')) else: vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE ).AndReturn(FakeVirtDomain().XMLDesc(flags=0)) vdmock.migrateToURI2(drvr._live_migration_uri('dest'), dxml=mox.IgnoreArg(), flags=mox.IgnoreArg(), bandwidth=_bandwidth).AndRaise( fakelibvirt.libvirtError('ERR')) # start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='127.0.0.1', graphics_listen_addr_spice='127.0.0.1', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=False) self.mox.ReplayAll() self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, guest, []) self.assertEqual(vm_states.ACTIVE, instance_ref.vm_state) self.assertEqual(power_state.RUNNING, instance_ref.power_state) @mock.patch('shutil.rmtree') @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy') def test_rollback_live_migration_at_dest_not_shared(self, mock_destroy, mock_get_instance_path, mock_exist, mock_shutil ): # destroy method may raise InstanceTerminationFailure or # InstancePowerOffFailure, here use their base class Invalid. mock_destroy.side_effect = exception.Invalid(reason='just test') fake_instance_path = os.path.join(cfg.CONF.instances_path, '/fake_instance_uuid') mock_get_instance_path.return_value = fake_instance_path drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) migrate_data = objects.LibvirtLiveMigrateData( is_shared_instance_path=False, instance_relative_path=False) self.assertRaises(exception.Invalid, drvr.rollback_live_migration_at_destination, "context", "instance", [], None, True, migrate_data) mock_exist.assert_called_once_with(fake_instance_path) mock_shutil.assert_called_once_with(fake_instance_path) @mock.patch('shutil.rmtree') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy') def test_rollback_live_migration_at_dest_shared(self, mock_destroy, mock_get_instance_path, mock_exist, mock_shutil ): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) migrate_data = objects.LibvirtLiveMigrateData( is_shared_instance_path=True, instance_relative_path=False) drvr.rollback_live_migration_at_destination("context", "instance", [], None, True, migrate_data) mock_destroy.assert_called_once_with("context", "instance", [], None, True, migrate_data) self.assertFalse(mock_get_instance_path.called) self.assertFalse(mock_exist.called) self.assertFalse(mock_shutil.called) @mock.patch.object(host.Host, "get_connection") @mock.patch.object(host.Host, "has_min_version", return_value=False) @mock.patch.object(fakelibvirt.Domain, "XMLDesc") def test_live_migration_copy_disk_paths(self, mock_xml, mock_version, mock_conn): xml = """ <domain> <name>dummy</name> <uuid>d4e13113-918e-42fe-9fc9-861693ffd432</uuid> <devices> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.root"/> <target dev="vda"/> </disk> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.shared"/> <target dev="vdb"/> <shareable/> </disk> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.config"/> <target dev="vdc"/> <readonly/> </disk> <disk type="block"> <source dev="/dev/mapper/somevol"/> <target dev="vdd"/> </disk> <disk type="network"> <source protocol="https" name="url_path"> <host name="hostname" port="443"/> </source> </disk> </devices> </domain>""" mock_xml.return_value = xml drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dom = fakelibvirt.Domain(drvr._get_connection(), xml, False) guest = libvirt_guest.Guest(dom) paths = drvr._live_migration_copy_disk_paths(None, None, guest) self.assertEqual((["/var/lib/nova/instance/123/disk.root", "/dev/mapper/somevol"], ['vda', 'vdd']), paths) @mock.patch.object(fakelibvirt.Domain, "XMLDesc") def test_live_migration_copy_disk_paths_tunnelled(self, mock_xml): self.flags(live_migration_tunnelled=True, group='libvirt') xml = """ <domain> <name>dummy</name> <uuid>d4e13113-918e-42fe-9fc9-861693ffd432</uuid> <devices> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.root"/> <target dev="vda"/> </disk> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.shared"/> <target dev="vdb"/> <shareable/> </disk> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.config"/> <target dev="vdc"/> <readonly/> </disk> <disk type="block"> <source dev="/dev/mapper/somevol"/> <target dev="vdd"/> </disk> <disk type="network"> <source protocol="https" name="url_path"> <host name="hostname" port="443"/> </source> </disk> </devices> </domain>""" mock_xml.return_value = xml drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._parse_migration_flags() dom = fakelibvirt.Domain(drvr._get_connection(), xml, False) guest = libvirt_guest.Guest(dom) paths = drvr._live_migration_copy_disk_paths(None, None, guest) self.assertEqual((["/var/lib/nova/instance/123/disk.root", "/dev/mapper/somevol"], ['vda', 'vdd']), paths) @mock.patch.object(host.Host, "get_connection") @mock.patch.object(host.Host, "has_min_version", return_value=True) @mock.patch('nova.virt.driver.get_block_device_info') @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid') @mock.patch.object(fakelibvirt.Domain, "XMLDesc") def test_live_migration_copy_disk_paths_selective_block_migration( self, mock_xml, mock_get_instance, mock_block_device_info, mock_version, mock_conn): xml = """ <domain> <name>dummy</name> <uuid>d4e13113-918e-42fe-9fc9-861693ffd432</uuid> <devices> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.root"/> <target dev="vda"/> </disk> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.shared"/> <target dev="vdb"/> </disk> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.config"/> <target dev="vdc"/> </disk> <disk type="block"> <source dev="/dev/mapper/somevol"/> <target dev="vdd"/> </disk> <disk type="network"> <source protocol="https" name="url_path"> <host name="hostname" port="443"/> </source> </disk> </devices> </domain>""" mock_xml.return_value = xml instance = objects.Instance(**self.test_instance) instance.root_device_name = '/dev/vda' block_device_info = { 'swap': { 'disk_bus': u'virtio', 'swap_size': 10, 'device_name': u'/dev/vdc' }, 'root_device_name': u'/dev/vda', 'ephemerals': [{ 'guest_format': u'ext3', 'device_name': u'/dev/vdb', 'disk_bus': u'virtio', 'device_type': u'disk', 'size': 1 }], 'block_device_mapping': [{ 'guest_format': None, 'boot_index': None, 'mount_device': u'/dev/vdd', 'connection_info': { u'driver_volume_type': u'iscsi', 'serial': u'147df29f-aec2-4851-b3fe-f68dad151834', u'data': { u'access_mode': u'rw', u'target_discovered': False, u'encrypted': False, u'qos_specs': None, u'target_iqn': u'iqn.2010-10.org.openstack:' u'volume-147df29f-aec2-4851-b3fe-' u'f68dad151834', u'target_portal': u'10.102.44.141:3260', u'volume_id': u'147df29f-aec2-4851-b3fe-f68dad151834', u'target_lun': 1, u'auth_password': u'cXELT66FngwzTwpf', u'auth_username': u'QbQQjj445uWgeQkFKcVw', u'auth_method': u'CHAP' } }, 'disk_bus': None, 'device_type': None, 'delete_on_termination': False }] } mock_block_device_info.return_value = block_device_info drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dom = fakelibvirt.Domain(drvr._get_connection(), xml, False) guest = libvirt_guest.Guest(dom) return_value = drvr._live_migration_copy_disk_paths(self.context, instance, guest) expected = (['/var/lib/nova/instance/123/disk.root', '/var/lib/nova/instance/123/disk.shared', '/var/lib/nova/instance/123/disk.config'], ['vda', 'vdb', 'vdc']) self.assertEqual(expected, return_value) @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_copy_disk_paths") def test_live_migration_data_gb_plain(self, mock_paths): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) data_gb = drvr._live_migration_data_gb(instance, []) self.assertEqual(2, data_gb) self.assertEqual(0, mock_paths.call_count) def test_live_migration_data_gb_block(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) def fake_stat(path): class StatResult(object): def __init__(self, size): self._size = size @property def st_size(self): return self._size if path == "/var/lib/nova/instance/123/disk.root": return StatResult(10 * units.Gi) elif path == "/dev/mapper/somevol": return StatResult(1.5 * units.Gi) else: raise Exception("Should not be reached") disk_paths = ["/var/lib/nova/instance/123/disk.root", "/dev/mapper/somevol"] with mock.patch.object(os, "stat") as mock_stat: mock_stat.side_effect = fake_stat data_gb = drvr._live_migration_data_gb(instance, disk_paths) # Expecting 2 GB for RAM, plus 10 GB for disk.root # and 1.5 GB rounded to 2 GB for somevol, so 14 GB self.assertEqual(14, data_gb) EXPECT_SUCCESS = 1 EXPECT_FAILURE = 2 EXPECT_ABORT = 3 @mock.patch.object(libvirt_guest.Guest, "migrate_start_postcopy") @mock.patch.object(time, "time") @mock.patch.object(time, "sleep", side_effect=lambda x: eventlet.sleep(0)) @mock.patch.object(host.Host, "get_connection") @mock.patch.object(libvirt_guest.Guest, "get_job_info") @mock.patch.object(objects.Instance, "save") @mock.patch.object(objects.Migration, "save") @mock.patch.object(fakelibvirt.Connection, "_mark_running") @mock.patch.object(fakelibvirt.virDomain, "abortJob") @mock.patch.object(libvirt_guest.Guest, "pause") def _test_live_migration_monitoring(self, job_info_records, time_records, expect_result, mock_pause, mock_abort, mock_running, mock_save, mock_mig_save, mock_job_info, mock_conn, mock_sleep, mock_time, mock_postcopy_switch, current_mig_status=None, expected_mig_status=None, scheduled_action=None, scheduled_action_executed=False, block_migration=False, expected_switch=False): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) drvr.active_migrations[instance.uuid] = deque() dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", True) guest = libvirt_guest.Guest(dom) finish_event = eventlet.event.Event() def fake_job_info(): while True: self.assertGreater(len(job_info_records), 0) rec = job_info_records.pop(0) if type(rec) == str: if rec == "thread-finish": finish_event.send() elif rec == "domain-stop": dom.destroy() elif rec == "force_complete": drvr.active_migrations[instance.uuid].append( "force-complete") else: if len(time_records) > 0: time_records.pop(0) return rec return rec def fake_time(): if len(time_records) > 0: return time_records[0] else: return int( datetime.datetime(2001, 1, 20, 20, 1, 0) .strftime('%s')) mock_job_info.side_effect = fake_job_info mock_time.side_effect = fake_time dest = mock.sentinel.migrate_dest migration = objects.Migration(context=self.context, id=1) migrate_data = objects.LibvirtLiveMigrateData( migration=migration, block_migration=block_migration) if current_mig_status: migrate_data.migration.status = current_mig_status else: migrate_data.migration.status = "unset" migrate_data.migration.save() fake_post_method = mock.MagicMock() fake_recover_method = mock.MagicMock() drvr._live_migration_monitor(self.context, instance, guest, dest, fake_post_method, fake_recover_method, False, migrate_data, finish_event, []) if scheduled_action_executed: if scheduled_action == 'pause': self.assertTrue(mock_pause.called) if scheduled_action == 'postcopy_switch': self.assertTrue(mock_postcopy_switch.called) else: if scheduled_action == 'pause': self.assertFalse(mock_pause.called) if scheduled_action == 'postcopy_switch': self.assertFalse(mock_postcopy_switch.called) mock_mig_save.assert_called_with() if expect_result == self.EXPECT_SUCCESS: self.assertFalse(fake_recover_method.called, 'Recover method called when success expected') self.assertFalse(mock_abort.called, 'abortJob not called when success expected') if expected_switch: self.assertTrue(mock_postcopy_switch.called) fake_post_method.assert_called_once_with( self.context, instance, dest, False, migrate_data) else: if expect_result == self.EXPECT_ABORT: self.assertTrue(mock_abort.called, 'abortJob called when abort expected') else: self.assertFalse(mock_abort.called, 'abortJob not called when failure expected') self.assertFalse(fake_post_method.called, 'Post method called when success not expected') if expected_mig_status: fake_recover_method.assert_called_once_with( self.context, instance, dest, False, migrate_data, migration_status=expected_mig_status) else: fake_recover_method.assert_called_once_with( self.context, instance, dest, False, migrate_data) self.assertNotIn(instance.uuid, drvr.active_migrations) def test_live_migration_monitor_success(self): # A normal sequence where see all the normal job states domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS) def test_live_migration_handle_pause_normal(self): # A normal sequence where see all the normal job states, and pause # scheduled in between VIR_DOMAIN_JOB_UNBOUNDED domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS, current_mig_status="running", scheduled_action="pause", scheduled_action_executed=True) def test_live_migration_handle_pause_on_start(self): # A normal sequence where see all the normal job states, and pause # scheduled in case of job type VIR_DOMAIN_JOB_NONE and finish_event is # not ready yet domain_info_records = [ "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS, current_mig_status="preparing", scheduled_action="pause", scheduled_action_executed=True) def test_live_migration_handle_pause_on_finish(self): # A normal sequence where see all the normal job states, and pause # scheduled in case of job type VIR_DOMAIN_JOB_NONE and finish_event is # ready domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS, current_mig_status="completed", scheduled_action="pause", scheduled_action_executed=False) def test_live_migration_handle_pause_on_cancel(self): # A normal sequence where see all the normal job states, and pause # scheduled in case of job type VIR_DOMAIN_JOB_CANCELLED domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE, current_mig_status="cancelled", expected_mig_status='cancelled', scheduled_action="pause", scheduled_action_executed=False) def test_live_migration_handle_pause_on_failure(self): # A normal sequence where see all the normal job states, and pause # scheduled in case of job type VIR_DOMAIN_JOB_FAILED domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_FAILED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE, scheduled_action="pause", scheduled_action_executed=False) @mock.patch.object(libvirt_driver.LibvirtDriver, "_is_post_copy_enabled") def test_live_migration_handle_postcopy_normal(self, mock_postcopy_enabled): # A normal sequence where see all the normal job states, and postcopy # switch scheduled in between VIR_DOMAIN_JOB_UNBOUNDED mock_postcopy_enabled.return_value = True domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS, current_mig_status="running", scheduled_action="postcopy_switch", scheduled_action_executed=True) @mock.patch.object(libvirt_driver.LibvirtDriver, "_is_post_copy_enabled") def test_live_migration_handle_postcopy_on_start(self, mock_postcopy_enabled): # A normal sequence where see all the normal job states, and postcopy # switch scheduled in case of job type VIR_DOMAIN_JOB_NONE and # finish_event is not ready yet mock_postcopy_enabled.return_value = True domain_info_records = [ "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS, current_mig_status="preparing", scheduled_action="postcopy_switch", scheduled_action_executed=True) @mock.patch.object(libvirt_driver.LibvirtDriver, "_is_post_copy_enabled") def test_live_migration_handle_postcopy_on_finish(self, mock_postcopy_enabled): # A normal sequence where see all the normal job states, and postcopy # switch scheduled in case of job type VIR_DOMAIN_JOB_NONE and # finish_event is ready mock_postcopy_enabled.return_value = True domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS, current_mig_status="completed", scheduled_action="postcopy_switch", scheduled_action_executed=False) @mock.patch.object(libvirt_driver.LibvirtDriver, "_is_post_copy_enabled") def test_live_migration_handle_postcopy_on_cancel(self, mock_postcopy_enabled): # A normal sequence where see all the normal job states, and postcopy # scheduled in case of job type VIR_DOMAIN_JOB_CANCELLED mock_postcopy_enabled.return_value = True domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE, current_mig_status="cancelled", expected_mig_status='cancelled', scheduled_action="postcopy_switch", scheduled_action_executed=False) @mock.patch.object(libvirt_driver.LibvirtDriver, "_is_post_copy_enabled") def test_live_migration_handle_pause_on_postcopy(self, mock_postcopy_enabled): # A normal sequence where see all the normal job states, and pause # scheduled after migration switched to postcopy mock_postcopy_enabled.return_value = True domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS, current_mig_status="running (post-copy)", scheduled_action="pause", scheduled_action_executed=False) @mock.patch.object(libvirt_driver.LibvirtDriver, "_is_post_copy_enabled") def test_live_migration_handle_postcopy_on_postcopy(self, mock_postcopy_enabled): # A normal sequence where see all the normal job states, and pause # scheduled after migration switched to postcopy mock_postcopy_enabled.return_value = True domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS, current_mig_status="running (post-copy)", scheduled_action="postcopy_switch", scheduled_action_executed=False) @mock.patch.object(libvirt_driver.LibvirtDriver, "_is_post_copy_enabled") def test_live_migration_handle_postcopy_on_failure(self, mock_postcopy_enabled): # A normal sequence where see all the normal job states, and postcopy # scheduled in case of job type VIR_DOMAIN_JOB_FAILED mock_postcopy_enabled.return_value = True domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_FAILED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE, scheduled_action="postcopy_switch", scheduled_action_executed=False) def test_live_migration_monitor_success_race(self): # A normalish sequence but we're too slow to see the # completed job state domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS) def test_live_migration_monitor_failed(self): # A failed sequence where we see all the expected events domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_FAILED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE) def test_live_migration_monitor_failed_race(self): # A failed sequence where we are too slow to see the # failed event domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE) def test_live_migration_monitor_cancelled(self): # A cancelled sequence where we see all the events domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE, expected_mig_status='cancelled') @mock.patch.object(fakelibvirt.virDomain, "migrateSetMaxDowntime") @mock.patch.object(libvirt_driver.LibvirtDriver, "_migration_downtime_steps") def test_live_migration_monitor_downtime(self, mock_downtime_steps, mock_set_downtime): self.flags(live_migration_completion_timeout=1000000, live_migration_progress_timeout=1000000, group='libvirt') # We've setup 4 fake downtime steps - first value is the # time delay, second is the downtime value downtime_steps = [ (90, 10), (180, 50), (270, 200), (500, 300), ] mock_downtime_steps.return_value = downtime_steps # Each one of these fake times is used for time.time() # when a new domain_info_records entry is consumed. # Times are chosen so that only the first 3 downtime # steps are needed. fake_times = [0, 1, 30, 95, 150, 200, 300] # A normal sequence where see all the normal job states domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, fake_times, self.EXPECT_SUCCESS) mock_set_downtime.assert_has_calls([mock.call(10), mock.call(50), mock.call(200)]) def test_live_migration_monitor_completion(self): self.flags(live_migration_completion_timeout=100, live_migration_progress_timeout=1000000, group='libvirt') # Each one of these fake times is used for time.time() # when a new domain_info_records entry is consumed. fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320] # A normal sequence where see all the normal job states domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED), ] self._test_live_migration_monitoring(domain_info_records, fake_times, self.EXPECT_ABORT, expected_mig_status='cancelled') def test_live_migration_monitor_progress(self): self.flags(live_migration_completion_timeout=1000000, live_migration_progress_timeout=150, group='libvirt') # Each one of these fake times is used for time.time() # when a new domain_info_records entry is consumed. fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320] # A normal sequence where see all the normal job states domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED), ] self._test_live_migration_monitoring(domain_info_records, fake_times, self.EXPECT_ABORT, expected_mig_status='cancelled') def test_live_migration_monitor_progress_zero_data_remaining(self): self.flags(live_migration_completion_timeout=1000000, live_migration_progress_timeout=150, group='libvirt') # Each one of these fake times is used for time.time() # when a new domain_info_records entry is consumed. fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320] # A normal sequence where see all the normal job states domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=0), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=70), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=50), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=30), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=10), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=0), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_FAILED), ] self._test_live_migration_monitoring(domain_info_records, fake_times, self.EXPECT_FAILURE) def test_live_migration_downtime_steps(self): self.flags(live_migration_downtime=400, group='libvirt') self.flags(live_migration_downtime_steps=10, group='libvirt') self.flags(live_migration_downtime_delay=30, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) steps = drvr._migration_downtime_steps(3.0) self.assertEqual([ (0, 37), (90, 38), (180, 39), (270, 42), (360, 46), (450, 55), (540, 70), (630, 98), (720, 148), (810, 238), (900, 400), ], list(steps)) @mock.patch('nova.virt.libvirt.migration.should_switch_to_postcopy') @mock.patch.object(libvirt_driver.LibvirtDriver, "_is_post_copy_enabled") def test_live_migration_monitor_postcopy_switch(self, mock_postcopy_enabled, mock_should_switch): # A normal sequence where migration is switched to postcopy mode mock_postcopy_enabled.return_value = True switch_values = [False, False, True] mock_should_switch.return_value = switch_values domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS, expected_switch=True) @mock.patch.object(host.Host, "get_connection") @mock.patch.object(utils, "spawn") @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_monitor") @mock.patch.object(host.Host, "get_guest") @mock.patch.object(fakelibvirt.Connection, "_mark_running") @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_copy_disk_paths") def test_live_migration_main(self, mock_copy_disk_path, mock_running, mock_guest, mock_monitor, mock_thread, mock_conn): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) dom = fakelibvirt.Domain(drvr._get_connection(), "<domain><name>demo</name></domain>", True) guest = libvirt_guest.Guest(dom) migrate_data = objects.LibvirtLiveMigrateData(block_migration=True) disks_to_copy = (['/some/path/one', '/test/path/two'], ['vda', 'vdb']) mock_copy_disk_path.return_value = disks_to_copy mock_guest.return_value = guest def fake_post(): pass def fake_recover(): pass drvr._live_migration(self.context, instance, "fakehost", fake_post, fake_recover, True, migrate_data) mock_copy_disk_path.assert_called_once_with(self.context, instance, guest) class AnyEventletEvent(object): def __eq__(self, other): return type(other) == eventlet.event.Event mock_thread.assert_called_once_with( drvr._live_migration_operation, self.context, instance, "fakehost", True, migrate_data, guest, disks_to_copy[1]) mock_monitor.assert_called_once_with( self.context, instance, guest, "fakehost", fake_post, fake_recover, True, migrate_data, AnyEventletEvent(), disks_to_copy[0]) def _do_test_create_images_and_backing(self, disk_type): instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(drvr, '_fetch_instance_kernel_ramdisk') self.mox.StubOutWithMock(libvirt_driver.libvirt_utils, 'create_image') disk_info = {'path': 'foo', 'type': disk_type, 'disk_size': 1 * 1024 ** 3, 'virt_disk_size': 20 * 1024 ** 3, 'backing_file': None} libvirt_driver.libvirt_utils.create_image( disk_info['type'], mox.IgnoreArg(), disk_info['virt_disk_size']) drvr._fetch_instance_kernel_ramdisk(self.context, instance, fallback_from_host=None) self.mox.ReplayAll() self.stub_out('os.path.exists', lambda *args: False) drvr._create_images_and_backing(self.context, instance, "/fake/instance/dir", [disk_info]) def test_create_images_and_backing_qcow2(self): self._do_test_create_images_and_backing('qcow2') def test_create_images_and_backing_raw(self): self._do_test_create_images_and_backing('raw') def test_create_images_and_backing_images_not_exist_no_fallback(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.test_instance.update({'user_id': 'fake-user', 'os_type': None, 'project_id': 'fake-project'}) instance = objects.Instance(**self.test_instance) backing_file = imagecache.get_cache_fname(instance.image_ref) disk_info = [ {u'backing_file': backing_file, u'disk_size': 10747904, u'path': u'disk_path', u'type': u'qcow2', u'virt_disk_size': 25165824}] with mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image', side_effect=exception.ImageNotFound( image_id="fake_id")): self.assertRaises(exception.ImageNotFound, conn._create_images_and_backing, self.context, instance, "/fake/instance/dir", disk_info) def test_create_images_and_backing_images_not_exist_fallback(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) base_dir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) self.test_instance.update({'user_id': 'fake-user', 'os_type': None, 'kernel_id': uuids.kernel_id, 'ramdisk_id': uuids.ramdisk_id, 'project_id': 'fake-project'}) instance = objects.Instance(**self.test_instance) backing_file = imagecache.get_cache_fname(instance.image_ref) disk_info = [ {u'backing_file': backing_file, u'disk_size': 10747904, u'path': u'disk_path', u'type': u'qcow2', u'virt_disk_size': 25165824}] with test.nested( mock.patch.object(libvirt_driver.libvirt_utils, 'copy_image'), mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image', side_effect=exception.ImageNotFound( image_id=uuids.fake_id)), ) as (copy_image_mock, fetch_image_mock): conn._create_images_and_backing(self.context, instance, "/fake/instance/dir", disk_info, fallback_from_host="fake_host") backfile_path = os.path.join(base_dir, backing_file) kernel_path = os.path.join(CONF.instances_path, self.test_instance['uuid'], 'kernel') ramdisk_path = os.path.join(CONF.instances_path, self.test_instance['uuid'], 'ramdisk') copy_image_mock.assert_has_calls([ mock.call(dest=backfile_path, src=backfile_path, host='fake_host', receive=True), mock.call(dest=kernel_path, src=kernel_path, host='fake_host', receive=True), mock.call(dest=ramdisk_path, src=ramdisk_path, host='fake_host', receive=True) ]) fetch_image_mock.assert_has_calls([ mock.call(context=self.context, target=backfile_path, image_id=self.test_instance['image_ref']), mock.call(self.context, kernel_path, instance.kernel_id), mock.call(self.context, ramdisk_path, instance.ramdisk_id) ]) @mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image') def test_create_images_and_backing_images_exist(self, mock_fetch_image): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.test_instance.update({'user_id': 'fake-user', 'os_type': None, 'kernel_id': 'fake_kernel_id', 'ramdisk_id': 'fake_ramdisk_id', 'project_id': 'fake-project'}) instance = objects.Instance(**self.test_instance) disk_info = [ {u'backing_file': imagecache.get_cache_fname(instance.image_ref), u'disk_size': 10747904, u'path': u'disk_path', u'type': u'qcow2', u'virt_disk_size': 25165824}] with test.nested( mock.patch.object(imagebackend.Image, 'get_disk_size'), mock.patch.object(os.path, 'exists', return_value=True) ): conn._create_images_and_backing(self.context, instance, '/fake/instance/dir', disk_info) self.assertFalse(mock_fetch_image.called) def test_create_images_and_backing_ephemeral_gets_created(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) base_dir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) instance = objects.Instance(**self.test_instance) disk_info_byname = fake_disk_info_byname(instance) disk_info = disk_info_byname.values() # Give the ephemeral disk a non-default name disk_info_byname['disk.local']['backing_file'] = 'ephemeral_foo' with test.nested( mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image'), mock.patch.object(drvr, '_create_ephemeral'), mock.patch.object(imagebackend.Image, 'verify_base_size'), mock.patch.object(imagebackend.Image, 'get_disk_size') ) as (fetch_image_mock, create_ephemeral_mock, verify_base_size_mock, disk_size_mock): drvr._create_images_and_backing(self.context, instance, CONF.instances_path, disk_info) self.assertEqual(len(create_ephemeral_mock.call_args_list), 1) root_backing, ephemeral_backing = [ os.path.join(base_dir, name) for name in (disk_info_byname['disk']['backing_file'], 'ephemeral_foo') ] m_args, m_kwargs = create_ephemeral_mock.call_args_list[0] self.assertEqual(ephemeral_backing, m_kwargs['target']) self.assertEqual(len(fetch_image_mock.call_args_list), 1) m_args, m_kwargs = fetch_image_mock.call_args_list[0] self.assertEqual(root_backing, m_kwargs['target']) verify_base_size_mock.assert_has_calls([ mock.call(root_backing, instance.flavor.root_gb * units.Gi), mock.call(ephemeral_backing, instance.flavor.ephemeral_gb * units.Gi) ]) def test_create_images_and_backing_disk_info_none(self): instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) fake_backend = self.useFixture(fake_imagebackend.ImageBackendFixture()) drvr._create_images_and_backing(self.context, instance, "/fake/instance/dir", None) # Assert that we did nothing self.assertEqual({}, fake_backend.created_disks) def _generate_target_ret(self, target_connect_addr=None): target_ret = { 'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'}, 'target_connect_addr': target_connect_addr, 'serial_listen_addr': '127.0.0.1', 'volume': { '12345': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}, 'serial': '12345'}, 'disk_info': {'bus': 'scsi', 'dev': 'sda', 'type': 'disk'}}, '67890': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}, 'serial': '67890'}, 'disk_info': {'bus': 'scsi', 'dev': 'sdb', 'type': 'disk'}}}} return target_ret def test_pre_live_migration_works_correctly_mocked(self): self._test_pre_live_migration_works_correctly_mocked() def test_pre_live_migration_with_transport_ip(self): self.flags(live_migration_inbound_addr='127.0.0.2', group='libvirt') target_ret = self._generate_target_ret('127.0.0.2') self._test_pre_live_migration_works_correctly_mocked(target_ret) def _test_pre_live_migration_works_correctly_mocked(self, target_ret=None): # Creating testdata vol = {'block_device_mapping': [ {'connection_info': {'serial': '12345', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}}, 'mount_device': '/dev/sda'}, {'connection_info': {'serial': '67890', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}, 'mount_device': '/dev/sdb'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) class FakeNetworkInfo(object): def fixed_ips(self): return ["test_ip_addr"] def fake_none(*args, **kwargs): return self.stubs.Set(drvr, '_create_images_and_backing', fake_none) instance = objects.Instance(**self.test_instance) c = context.get_admin_context() nw_info = FakeNetworkInfo() # Creating mocks self.mox.StubOutWithMock(driver, "block_device_info_get_mapping") driver.block_device_info_get_mapping(vol ).AndReturn(vol['block_device_mapping']) self.mox.StubOutWithMock(drvr, "_connect_volume") for v in vol['block_device_mapping']: disk_info = { 'bus': "scsi", 'dev': v['mount_device'].rpartition("/")[2], 'type': "disk" } drvr._connect_volume(v['connection_info'], disk_info) self.mox.StubOutWithMock(drvr, 'plug_vifs') drvr.plug_vifs(mox.IsA(instance), nw_info) self.mox.ReplayAll() migrate_data = migrate_data_obj.LibvirtLiveMigrateData( block_migration=False, instance_relative_path='foo', is_shared_block_storage=False, is_shared_instance_path=False, ) result = drvr.pre_live_migration( c, instance, vol, nw_info, None, migrate_data=migrate_data) if not target_ret: target_ret = self._generate_target_ret() self.assertEqual( result.to_legacy_dict( pre_migration_result=True)['pre_live_migration_result'], target_ret) @mock.patch.object(os, 'mkdir') @mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination') @mock.patch('nova.virt.libvirt.driver.remotefs.' 'RemoteFilesystem.copy_file') @mock.patch('nova.virt.driver.block_device_info_get_mapping') @mock.patch('nova.virt.configdrive.required_by', return_value=True) def test_pre_live_migration_block_with_config_drive_success( self, mock_required_by, block_device_info_get_mapping, mock_copy_file, mock_get_instance_path, mock_mkdir): self.flags(config_drive_format='iso9660') vol = {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sda'}, {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]} fake_instance_path = os.path.join(cfg.CONF.instances_path, '/fake_instance_uuid') mock_get_instance_path.return_value = fake_instance_path drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) migrate_data = objects.LibvirtLiveMigrateData() migrate_data.is_shared_instance_path = False migrate_data.is_shared_block_storage = False migrate_data.block_migration = True migrate_data.instance_relative_path = 'foo' src = "%s:%s/disk.config" % (instance.host, fake_instance_path) result = drvr.pre_live_migration( self.context, instance, vol, [], None, migrate_data) block_device_info_get_mapping.assert_called_once_with( {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sda'}, {'connection_info': 'dummy', 'mount_device': '/dev/sdb'} ]} ) mock_copy_file.assert_called_once_with(src, fake_instance_path) migrate_data.graphics_listen_addrs_vnc = '127.0.0.1' migrate_data.graphics_listen_addrs_spice = '127.0.0.1' migrate_data.serial_listen_addr = '127.0.0.1' self.assertEqual(migrate_data, result) @mock.patch('nova.virt.driver.block_device_info_get_mapping', return_value=()) def test_pre_live_migration_block_with_config_drive_mocked_with_vfat( self, block_device_info_get_mapping): self.flags(config_drive_format='vfat') # Creating testdata vol = {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sda'}, {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) instance.config_drive = 'True' migrate_data = migrate_data_obj.LibvirtLiveMigrateData( is_shared_instance_path=False, is_shared_block_storage=False, block_migration=False, instance_relative_path='foo', ) res_data = drvr.pre_live_migration( self.context, instance, vol, [], None, migrate_data) res_data = res_data.to_legacy_dict(pre_migration_result=True) block_device_info_get_mapping.assert_called_once_with( {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sda'}, {'connection_info': 'dummy', 'mount_device': '/dev/sdb'} ]} ) self.assertEqual({'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'}, 'target_connect_addr': None, 'serial_listen_addr': '127.0.0.1', 'volume': {}}, res_data['pre_live_migration_result']) def test_pre_live_migration_vol_backed_works_correctly_mocked(self): # Creating testdata, using temp dir. with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) vol = {'block_device_mapping': [ {'connection_info': {'serial': '12345', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}}, 'mount_device': '/dev/sda'}, {'connection_info': {'serial': '67890', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}, 'mount_device': '/dev/sdb'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) def fake_none(*args, **kwargs): return self.stubs.Set(drvr, '_create_images_and_backing', fake_none) class FakeNetworkInfo(object): def fixed_ips(self): return ["test_ip_addr"] inst_ref = objects.Instance(**self.test_instance) c = context.get_admin_context() nw_info = FakeNetworkInfo() # Creating mocks self.mox.StubOutWithMock(drvr, "_connect_volume") for v in vol['block_device_mapping']: disk_info = { 'bus': "scsi", 'dev': v['mount_device'].rpartition("/")[2], 'type': "disk" } drvr._connect_volume(v['connection_info'], disk_info) self.mox.StubOutWithMock(drvr, 'plug_vifs') drvr.plug_vifs(mox.IsA(inst_ref), nw_info) self.mox.ReplayAll() migrate_data = migrate_data_obj.LibvirtLiveMigrateData( is_shared_instance_path=False, is_shared_block_storage=False, is_volume_backed=True, block_migration=False, instance_relative_path=inst_ref['name'], disk_over_commit=False, disk_available_mb=123, image_type='qcow2', filename='foo', ) ret = drvr.pre_live_migration(c, inst_ref, vol, nw_info, None, migrate_data) target_ret = { 'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'}, 'target_connect_addr': None, 'serial_listen_addr': '127.0.0.1', 'volume': { '12345': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}, 'serial': '12345'}, 'disk_info': {'bus': 'scsi', 'dev': 'sda', 'type': 'disk'}}, '67890': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}, 'serial': '67890'}, 'disk_info': {'bus': 'scsi', 'dev': 'sdb', 'type': 'disk'}}}} self.assertEqual( ret.to_legacy_dict(True)['pre_live_migration_result'], target_ret) self.assertTrue(os.path.exists('%s/%s/' % (tmpdir, inst_ref['name']))) def test_pre_live_migration_plug_vifs_retry_fails(self): self.flags(live_migration_retry_count=3) instance = objects.Instance(**self.test_instance) def fake_plug_vifs(instance, network_info): raise processutils.ProcessExecutionError() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(eventlet.greenthread, 'sleep', lambda x: eventlet.sleep(0)) disk_info_json = jsonutils.dumps({}) migrate_data = migrate_data_obj.LibvirtLiveMigrateData( is_shared_block_storage=True, is_shared_instance_path=True, block_migration=False, ) self.assertRaises(processutils.ProcessExecutionError, drvr.pre_live_migration, self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json, migrate_data=migrate_data) def test_pre_live_migration_plug_vifs_retry_works(self): self.flags(live_migration_retry_count=3) called = {'count': 0} instance = objects.Instance(**self.test_instance) def fake_plug_vifs(instance, network_info): called['count'] += 1 if called['count'] < CONF.live_migration_retry_count: raise processutils.ProcessExecutionError() else: return drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(eventlet.greenthread, 'sleep', lambda x: eventlet.sleep(0)) disk_info_json = jsonutils.dumps({}) migrate_data = migrate_data_obj.LibvirtLiveMigrateData( is_shared_block_storage=True, is_shared_instance_path=True, block_migration=False, ) drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json, migrate_data=migrate_data) def test_pre_live_migration_image_not_created_with_shared_storage(self): migrate_data_set = [{'is_shared_block_storage': False, 'is_shared_instance_path': True, 'is_volume_backed': False, 'filename': 'foo', 'instance_relative_path': 'bar', 'disk_over_commit': False, 'disk_available_mb': 123, 'image_type': 'qcow2', 'block_migration': False}, {'is_shared_block_storage': True, 'is_shared_instance_path': True, 'is_volume_backed': False, 'filename': 'foo', 'instance_relative_path': 'bar', 'disk_over_commit': False, 'disk_available_mb': 123, 'image_type': 'qcow2', 'block_migration': False}, {'is_shared_block_storage': False, 'is_shared_instance_path': True, 'is_volume_backed': False, 'filename': 'foo', 'instance_relative_path': 'bar', 'disk_over_commit': False, 'disk_available_mb': 123, 'image_type': 'qcow2', 'block_migration': True}] def _to_obj(d): return migrate_data_obj.LibvirtLiveMigrateData(**d) migrate_data_set = map(_to_obj, migrate_data_set) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) # creating mocks with test.nested( mock.patch.object(drvr, '_create_images_and_backing'), mock.patch.object(drvr, 'ensure_filtering_rules_for_instance'), mock.patch.object(drvr, 'plug_vifs'), ) as ( create_image_mock, rules_mock, plug_mock, ): disk_info_json = jsonutils.dumps({}) for migrate_data in migrate_data_set: res = drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json, migrate_data=migrate_data) self.assertFalse(create_image_mock.called) self.assertIsInstance(res, objects.LibvirtLiveMigrateData) def test_pre_live_migration_with_not_shared_instance_path(self): migrate_data = migrate_data_obj.LibvirtLiveMigrateData( is_shared_block_storage=False, is_shared_instance_path=False, block_migration=False, instance_relative_path='foo', ) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) def check_instance_dir(context, instance, instance_dir, disk_info, fallback_from_host=False): self.assertTrue(instance_dir) # creating mocks with test.nested( mock.patch.object(drvr, '_create_images_and_backing', side_effect=check_instance_dir), mock.patch.object(drvr, 'ensure_filtering_rules_for_instance'), mock.patch.object(drvr, 'plug_vifs'), ) as ( create_image_mock, rules_mock, plug_mock, ): disk_info_json = jsonutils.dumps({}) res = drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json, migrate_data=migrate_data) create_image_mock.assert_has_calls( [mock.call(self.context, instance, mock.ANY, {}, fallback_from_host=instance.host)]) self.assertIsInstance(res, objects.LibvirtLiveMigrateData) def test_pre_live_migration_recreate_disk_info(self): migrate_data = migrate_data_obj.LibvirtLiveMigrateData( is_shared_block_storage=False, is_shared_instance_path=False, block_migration=True, instance_relative_path='/some/path/', ) disk_info = [{'disk_size': 5368709120, 'type': 'raw', 'virt_disk_size': 5368709120, 'path': '/some/path/disk', 'backing_file': '', 'over_committed_disk_size': 0}, {'disk_size': 1073741824, 'type': 'raw', 'virt_disk_size': 1073741824, 'path': '/some/path/disk.eph0', 'backing_file': '', 'over_committed_disk_size': 0}] image_disk_info = {'/some/path/disk': 'raw', '/some/path/disk.eph0': 'raw'} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) instance_path = os.path.dirname(disk_info[0]['path']) disk_info_path = os.path.join(instance_path, 'disk.info') with test.nested( mock.patch.object(os, 'mkdir'), mock.patch.object(fake_libvirt_utils, 'write_to_file'), mock.patch.object(drvr, '_create_images_and_backing') ) as ( mkdir, write_to_file, create_images_and_backing ): drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=jsonutils.dumps(disk_info), migrate_data=migrate_data) write_to_file.assert_called_with(disk_info_path, jsonutils.dumps(image_disk_info)) def test_pre_live_migration_with_perf_events(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._supported_perf_events = ['cmt'] migrate_data = migrate_data_obj.LibvirtLiveMigrateData( is_shared_block_storage=False, is_shared_instance_path=False, block_migration=False, instance_relative_path='foo', ) instance = objects.Instance(**self.test_instance) res = drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=None, migrate_data=migrate_data) self.assertEqual(['cmt'], res.supported_perf_events) def test_get_instance_disk_info_works_correctly(self): # Test data instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='file'><driver name='qemu' type='raw'/>" "<source file='/test/disk'/>" "<target dev='vda' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/test/disk.local'/>" "<target dev='vdb' bus='virtio'/></disk>" "</devices></domain>") # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance.name: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file' self.mox.StubOutWithMock(os.path, "getsize") os.path.getsize('/test/disk').AndReturn((10737418240)) os.path.getsize('/test/disk.local').AndReturn((3328599655)) ret = ("image: /test/disk\n" "file format: raw\n" "virtual size: 20G (21474836480 bytes)\n" "disk size: 3.1G\n" "cluster_size: 2097152\n" "backing file: /test/dummy (actual path: /backing/file)\n") self.mox.StubOutWithMock(os.path, "exists") os.path.exists('/test/disk.local').AndReturn(True) self.mox.StubOutWithMock(utils, "execute") utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', '/test/disk.local', prlimit = images.QEMU_IMG_LIMITS, ).AndReturn((ret, '')) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_instance_disk_info(instance) info = jsonutils.loads(info) self.assertEqual(info[0]['type'], 'raw') self.assertEqual(info[0]['path'], '/test/disk') self.assertEqual(info[0]['disk_size'], 10737418240) self.assertEqual(info[0]['backing_file'], "") self.assertEqual(info[0]['over_committed_disk_size'], 0) self.assertEqual(info[1]['type'], 'qcow2') self.assertEqual(info[1]['path'], '/test/disk.local') self.assertEqual(info[1]['virt_disk_size'], 21474836480) self.assertEqual(info[1]['backing_file'], "file") self.assertEqual(info[1]['over_committed_disk_size'], 18146236825) def test_post_live_migration(self): vol = {'block_device_mapping': [ {'connection_info': { 'data': {'multipath_id': 'dummy1'}, 'serial': 'fake_serial1'}, 'mount_device': '/dev/sda', }, {'connection_info': { 'data': {}, 'serial': 'fake_serial2'}, 'mount_device': '/dev/sdb', }]} def fake_initialize_connection(context, volume_id, connector): return {'data': {}} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) fake_connector = {'host': 'fake'} inst_ref = {'id': 'foo'} cntx = context.get_admin_context() # Set up the mock expectations with test.nested( mock.patch.object(driver, 'block_device_info_get_mapping', return_value=vol['block_device_mapping']), mock.patch.object(drvr, "get_volume_connector", return_value=fake_connector), mock.patch.object(drvr._volume_api, "initialize_connection", side_effect=fake_initialize_connection), mock.patch.object(drvr, '_disconnect_volume') ) as (block_device_info_get_mapping, get_volume_connector, initialize_connection, _disconnect_volume): drvr.post_live_migration(cntx, inst_ref, vol) block_device_info_get_mapping.assert_has_calls([ mock.call(vol)]) get_volume_connector.assert_has_calls([ mock.call(inst_ref)]) _disconnect_volume.assert_has_calls([ mock.call({'data': {'multipath_id': 'dummy1'}}, 'sda'), mock.call({'data': {}}, 'sdb')]) def test_get_instance_disk_info_excludes_volumes(self): # Test data instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='file'><driver name='qemu' type='raw'/>" "<source file='/test/disk'/>" "<target dev='vda' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/test/disk.local'/>" "<target dev='vdb' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/fake/path/to/volume1'/>" "<target dev='vdc' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/fake/path/to/volume2'/>" "<target dev='vdd' bus='virtio'/></disk>" "</devices></domain>") # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance.name: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file' self.mox.StubOutWithMock(os.path, "getsize") os.path.getsize('/test/disk').AndReturn((10737418240)) os.path.getsize('/test/disk.local').AndReturn((3328599655)) ret = ("image: /test/disk\n" "file format: raw\n" "virtual size: 20G (21474836480 bytes)\n" "disk size: 3.1G\n" "cluster_size: 2097152\n" "backing file: /test/dummy (actual path: /backing/file)\n") self.mox.StubOutWithMock(os.path, "exists") os.path.exists('/test/disk.local').AndReturn(True) self.mox.StubOutWithMock(utils, "execute") utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', '/test/disk.local', prlimit = images.QEMU_IMG_LIMITS, ).AndReturn((ret, '')) self.mox.ReplayAll() conn_info = {'driver_volume_type': 'fake'} info = {'block_device_mapping': [ {'connection_info': conn_info, 'mount_device': '/dev/vdc'}, {'connection_info': conn_info, 'mount_device': '/dev/vdd'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_instance_disk_info(instance, block_device_info=info) info = jsonutils.loads(info) self.assertEqual(info[0]['type'], 'raw') self.assertEqual(info[0]['path'], '/test/disk') self.assertEqual(info[0]['disk_size'], 10737418240) self.assertEqual(info[0]['backing_file'], "") self.assertEqual(info[0]['over_committed_disk_size'], 0) self.assertEqual(info[1]['type'], 'qcow2') self.assertEqual(info[1]['path'], '/test/disk.local') self.assertEqual(info[1]['virt_disk_size'], 21474836480) self.assertEqual(info[1]['backing_file'], "file") self.assertEqual(info[1]['over_committed_disk_size'], 18146236825) def test_get_instance_disk_info_no_bdinfo_passed(self): # NOTE(ndipanov): _get_disk_overcomitted_size_total calls this method # without access to Nova's block device information. We want to make # sure that we guess volumes mostly correctly in that case as well instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='file'><driver name='qemu' type='raw'/>" "<source file='/test/disk'/>" "<target dev='vda' bus='virtio'/></disk>" "<disk type='block'><driver name='qemu' type='raw'/>" "<source file='/fake/path/to/volume1'/>" "<target dev='vdb' bus='virtio'/></disk>" "</devices></domain>") # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance.name: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi self.mox.StubOutWithMock(os.path, "getsize") os.path.getsize('/test/disk').AndReturn((10737418240)) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_instance_disk_info(instance) info = jsonutils.loads(info) self.assertEqual(1, len(info)) self.assertEqual(info[0]['type'], 'raw') self.assertEqual(info[0]['path'], '/test/disk') self.assertEqual(info[0]['disk_size'], 10737418240) self.assertEqual(info[0]['backing_file'], "") self.assertEqual(info[0]['over_committed_disk_size'], 0) def test_spawn_with_network_info(self): # Preparing mocks def fake_none(*args, **kwargs): return def fake_getLibVersion(): return fakelibvirt.FAKE_LIBVIRT_VERSION def fake_getCapabilities(): return """ <capabilities> <host> <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid> <cpu> <arch>x86_64</arch> <model>Penryn</model> <vendor>Intel</vendor> <topology sockets='1' cores='2' threads='1'/> <feature name='xtpr'/> </cpu> </host> </capabilities> """ def fake_baselineCPU(cpu, flag): return """<cpu mode='custom' match='exact'> <model fallback='allow'>Penryn</model> <vendor>Intel</vendor> <feature policy='require' name='xtpr'/> </cpu> """ # _fake_network_info must be called before create_fake_libvirt_mock(), # as _fake_network_info calls importutils.import_class() and # create_fake_libvirt_mock() mocks importutils.import_class(). network_info = _fake_network_info(self, 1) self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion, getCapabilities=fake_getCapabilities, getVersion=lambda: 1005001, baselineCPU=fake_baselineCPU) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 # we send an int to test sha1 call instance = objects.Instance(**instance_ref) instance.config_drive = '' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_build_device_metadata') libvirt_driver.LibvirtDriver._build_device_metadata(self.context, instance) # Mock out the get_info method of the LibvirtDriver so that the polling # in the spawn method of the LibvirtDriver returns immediately self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, 'get_info') libvirt_driver.LibvirtDriver.get_info(instance ).AndReturn(hardware.InstanceInfo(state=power_state.RUNNING)) # Start test self.mox.ReplayAll() with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt: del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr.firewall_driver, 'setup_basic_filtering', fake_none) self.stubs.Set(drvr.firewall_driver, 'prepare_instance_filter', fake_none) self.stubs.Set(imagebackend.Image, 'cache', fake_none) drvr.spawn(self.context, instance, image_meta, [], 'herp', network_info=network_info) path = os.path.join(CONF.instances_path, instance['name']) if os.path.isdir(path): shutil.rmtree(path) path = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) if os.path.isdir(path): shutil.rmtree(os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name)) # Methods called directly by spawn() @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_guest_xml') @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_domain_and_network') @mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') # Methods called by _create_configdrive via post_xml_callback @mock.patch.object(libvirt_driver.LibvirtDriver, '_build_device_metadata') @mock.patch.object(instance_metadata, 'InstanceMetadata') def test_spawn_with_config_drive(self, mock_instance_metadata, mock_build_device_metadata, mock_get_info, mock_create_domain_and_network, mock_get_guest_xml): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) instance.config_drive = 'True' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) instance_info = hardware.InstanceInfo(state=power_state.RUNNING) mock_build_device_metadata.return_value = None def fake_create_domain_and_network( context, xml, instance, network_info, disk_info, block_device_info=None, power_on=True, reboot=False, vifs_already_plugged=False, post_xml_callback=None): # The config disk should be created by this callback, so we need # to execute it. post_xml_callback() fake_backend = self.useFixture( fake_imagebackend.ImageBackendFixture(exists=lambda _: False)) mock_get_info.return_value = instance_info mock_create_domain_and_network.side_effect = \ fake_create_domain_and_network drvr.spawn(self.context, instance, image_meta, [], None) # We should have imported 'disk.config' config_disk = fake_backend.disks['disk.config'] config_disk.import_file.assert_called_once_with(instance, mock.ANY, 'disk.config') def test_spawn_without_image_meta(self): def fake_none(*args, **kwargs): return def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) instance_ref = self.test_instance instance_ref['image_ref'] = 1 instance = objects.Instance(**instance_ref) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) fake_backend = self.useFixture(fake_imagebackend.ImageBackendFixture()) drvr.spawn(self.context, instance, image_meta, [], None) # We should have created a root disk and an ephemeral disk self.assertEqual(['disk', 'disk.local'], sorted(fake_backend.created_disks.keys())) def test_spawn_from_volume_calls_cache(self): self.cache_called_for_disk = False def fake_none(*args, **kwargs): return def fake_cache(*args, **kwargs): if kwargs.get('image_id') == 'my_fake_image': self.cache_called_for_disk = True def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(imagebackend.Image, 'cache', fake_cache) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) block_device_info = {'root_device_name': '/dev/vda', 'block_device_mapping': [ {'mount_device': 'vda', 'boot_index': 0} ] } image_meta = objects.ImageMeta.from_dict(self.test_image_meta) # Volume-backed instance created without image instance_ref = self.test_instance instance_ref['image_ref'] = '' instance_ref['root_device_name'] = '/dev/vda' instance_ref['uuid'] = uuidutils.generate_uuid() instance = objects.Instance(**instance_ref) drvr.spawn(self.context, instance, image_meta, [], None, block_device_info=block_device_info) self.assertFalse(self.cache_called_for_disk) # Booted from volume but with placeholder image instance_ref = self.test_instance instance_ref['image_ref'] = 'my_fake_image' instance_ref['root_device_name'] = '/dev/vda' instance_ref['uuid'] = uuidutils.generate_uuid() instance = objects.Instance(**instance_ref) drvr.spawn(self.context, instance, image_meta, [], None, block_device_info=block_device_info) self.assertFalse(self.cache_called_for_disk) # Booted from an image instance_ref['image_ref'] = 'my_fake_image' instance_ref['uuid'] = uuidutils.generate_uuid() instance = objects.Instance(**instance_ref) drvr.spawn(self.context, instance, image_meta, [], None) self.assertTrue(self.cache_called_for_disk) def test_start_lxc_from_volume(self): self.flags(virt_type="lxc", group='libvirt') def check_setup_container(image, container_dir=None): self.assertIsInstance(image, imgmodel.LocalBlockImage) self.assertEqual(image.path, '/dev/path/to/dev') return '/dev/nbd1' bdm = { 'guest_format': None, 'boot_index': 0, 'mount_device': '/dev/sda', 'connection_info': { 'driver_volume_type': 'iscsi', 'serial': 'afc1', 'data': { 'access_mode': 'rw', 'target_discovered': False, 'encrypted': False, 'qos_specs': None, 'target_iqn': 'iqn: volume-afc1', 'target_portal': 'ip: 3260', 'volume_id': 'afc1', 'target_lun': 1, 'auth_password': 'uj', 'auth_username': '47', 'auth_method': 'CHAP' } }, 'disk_bus': 'scsi', 'device_type': 'disk', 'delete_on_termination': False } def _connect_volume_side_effect(connection_info, disk_info): bdm['connection_info']['data']['device_path'] = '/dev/path/to/dev' def _get(key, opt=None): return bdm.get(key, opt) def getitem(key): return bdm[key] def setitem(key, val): bdm[key] = val bdm_mock = mock.MagicMock() bdm_mock.__getitem__.side_effect = getitem bdm_mock.__setitem__.side_effect = setitem bdm_mock.get = _get disk_mock = mock.MagicMock() disk_mock.source_path = '/dev/path/to/dev' block_device_info = {'block_device_mapping': [bdm_mock], 'root_device_name': '/dev/sda'} # Volume-backed instance created without image instance_ref = self.test_instance instance_ref['image_ref'] = '' instance_ref['root_device_name'] = '/dev/sda' instance_ref['ephemeral_gb'] = 0 instance_ref['uuid'] = uuidutils.generate_uuid() inst_obj = objects.Instance(**instance_ref) image_meta = objects.ImageMeta.from_dict({}) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, '_connect_volume', side_effect=_connect_volume_side_effect), mock.patch.object(drvr, '_get_volume_config', return_value=disk_mock), mock.patch.object(drvr, 'get_info', return_value=hardware.InstanceInfo( state=power_state.RUNNING)), mock.patch('nova.virt.disk.api.setup_container', side_effect=check_setup_container), mock.patch('nova.virt.disk.api.teardown_container'), mock.patch.object(objects.Instance, 'save')): drvr.spawn(self.context, inst_obj, image_meta, [], None, network_info=[], block_device_info=block_device_info) self.assertEqual('/dev/nbd1', inst_obj.system_metadata.get( 'rootfs_device_name')) def test_spawn_with_pci_devices(self): def fake_none(*args, **kwargs): return None def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) class FakeLibvirtPciDevice(object): def dettach(self): return None def reset(self): return None def fake_node_device_lookup_by_name(address): pattern = ("pci_%(hex)s{4}_%(hex)s{2}_%(hex)s{2}_%(oct)s{1}" % dict(hex='[\da-f]', oct='[0-8]')) pattern = re.compile(pattern) if pattern.match(address) is None: raise fakelibvirt.libvirtError() return FakeLibvirtPciDevice() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) mock_connection = mock.MagicMock( nodeDeviceLookupByName=fake_node_device_lookup_by_name) instance_ref = self.test_instance instance_ref['image_ref'] = 'my_fake_image' instance = objects.Instance(**instance_ref) instance['pci_devices'] = objects.PciDeviceList( objects=[objects.PciDevice(address='0000:00:00.0')]) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.useFixture(fake_imagebackend.ImageBackendFixture()) with mock.patch.object(drvr, '_get_connection', return_value=mock_connection): drvr.spawn(self.context, instance, image_meta, [], None) def _test_create_image_plain(self, os_type='', filename='', mkfs=False): gotFiles = [] def fake_none(*args, **kwargs): return def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) instance_ref = self.test_instance instance_ref['image_ref'] = 1 instance = objects.Instance(**instance_ref) instance['os_type'] = os_type drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) if mkfs: self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND', {os_type: 'mkfs.ext4 --label %(fs_label)s %(target)s'}) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) self.useFixture( fake_imagebackend.ImageBackendFixture(got_files=gotFiles)) drvr._create_image(self.context, instance, disk_info['mapping']) drvr._get_guest_xml(self.context, instance, None, disk_info, image_meta) wantFiles = [ {'filename': '356a192b7913b04c54574d18c28d46e6395428ab', 'size': 10 * units.Gi}, {'filename': filename, 'size': 20 * units.Gi}, ] self.assertEqual(gotFiles, wantFiles) def test_create_image_plain_os_type_blank(self): self._test_create_image_plain(os_type='', filename=self._EPHEMERAL_20_DEFAULT, mkfs=False) def test_create_image_plain_os_type_none(self): self._test_create_image_plain(os_type=None, filename=self._EPHEMERAL_20_DEFAULT, mkfs=False) def test_create_image_plain_os_type_set_no_fs(self): self._test_create_image_plain(os_type='test', filename=self._EPHEMERAL_20_DEFAULT, mkfs=False) def test_create_image_plain_os_type_set_with_fs(self): ephemeral_file_name = ('ephemeral_20_%s' % utils.get_hash_str( 'mkfs.ext4 --label %(fs_label)s %(target)s')[:7]) self._test_create_image_plain(os_type='test', filename=ephemeral_file_name, mkfs=True) def test_create_image_initrd(self): kernel_id = uuids.kernel_id ramdisk_id = uuids.ramdisk_id kernel_fname = imagecache.get_cache_fname(kernel_id) ramdisk_fname = imagecache.get_cache_fname(ramdisk_id) filename = self._EPHEMERAL_20_DEFAULT gotFiles = [] instance_ref = self.test_instance instance_ref['image_ref'] = uuids.instance_id instance_ref['kernel_id'] = uuids.kernel_id instance_ref['ramdisk_id'] = uuids.ramdisk_id instance_ref['os_type'] = 'test' instance = objects.Instance(**instance_ref) driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) fake_backend = self.useFixture( fake_imagebackend.ImageBackendFixture(got_files=gotFiles)) with test.nested( mock.patch.object(driver, '_get_guest_xml'), mock.patch.object(driver, '_create_domain_and_network'), mock.patch.object(driver, 'get_info', return_value=[hardware.InstanceInfo(state=power_state.RUNNING)]) ): image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) driver._create_image(self.context, instance, disk_info['mapping']) # Assert that kernel and ramdisk were fetched with fetch_raw_image # and no size for name, disk in six.iteritems(fake_backend.disks): cache = disk.cache if name in ('kernel', 'ramdisk'): cache.assert_called_once_with( context=self.context, filename=mock.ANY, image_id=mock.ANY, fetch_func=fake_libvirt_utils.fetch_raw_image) wantFiles = [ {'filename': kernel_fname, 'size': None}, {'filename': ramdisk_fname, 'size': None}, {'filename': imagecache.get_cache_fname(uuids.instance_id), 'size': 10 * units.Gi}, {'filename': filename, 'size': 20 * units.Gi}, ] self.assertEqual(wantFiles, gotFiles) def _create_image_helper(self, callback, exists=None, suffix='', test_create_configdrive=False): def fake_none(*args, **kwargs): return def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) instance_ref = self.test_instance instance_ref['image_ref'] = 1 # NOTE(mikal): use this callback to tweak the instance to match # what you're trying to test callback(instance_ref) instance = objects.Instance(**instance_ref) # Turn on some swap to exercise that codepath in _create_image instance.flavor.swap = 500 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(instance_metadata, 'InstanceMetadata', fake_none) self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder, 'make_drive', fake_none) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) gotFiles = [] imported_files = [] self.useFixture(fake_imagebackend.ImageBackendFixture( got_files=gotFiles, imported_files=imported_files, exists=exists)) if test_create_configdrive: drvr._create_configdrive(self.context, instance) else: drvr._create_image(self.context, instance, disk_info['mapping'], suffix=suffix) drvr._get_guest_xml(self.context, instance, None, disk_info, image_meta) return gotFiles, imported_files def test_create_image_with_swap(self): def enable_swap(instance_ref): # Turn on some swap to exercise that codepath in _create_image instance_ref['system_metadata']['instance_type_swap'] = 500 gotFiles, _ = self._create_image_helper(enable_swap) wantFiles = [ {'filename': '356a192b7913b04c54574d18c28d46e6395428ab', 'size': 10 * units.Gi}, {'filename': self._EPHEMERAL_20_DEFAULT, 'size': 20 * units.Gi}, {'filename': 'swap_500', 'size': 500 * units.Mi}, ] self.assertEqual(gotFiles, wantFiles) @mock.patch( 'nova.virt.libvirt.driver.LibvirtDriver._build_device_metadata', return_value=None) def test_create_configdrive(self, mock_save): def enable_configdrive(instance_ref): instance_ref['config_drive'] = 'true' # Ensure that we create a config drive and then import it into the # image backend store _, imported_files = self._create_image_helper( enable_configdrive, exists=lambda name: False, test_create_configdrive=True) self.assertTrue(imported_files[0][0].endswith('/disk.config')) self.assertEqual('disk.config', imported_files[0][1]) @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache', side_effect=exception.ImageNotFound(image_id='fake-id')) def test_create_image_not_exist_no_fallback(self, mock_cache): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) self.assertRaises(exception.ImageNotFound, drvr._create_image, self.context, instance, disk_info['mapping']) @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache') def test_create_image_not_exist_fallback(self, mock_cache): def side_effect(fetch_func, filename, size=None, *args, **kwargs): def second_call(fetch_func, filename, size=None, *args, **kwargs): # call copy_from_host ourselves because we mocked image.cache() fetch_func('fake-target') # further calls have no side effect mock_cache.side_effect = None mock_cache.side_effect = second_call # raise an error only the first call raise exception.ImageNotFound(image_id='fake-id') mock_cache.side_effect = side_effect drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) with mock.patch.object(libvirt_driver.libvirt_utils, 'copy_image') as mock_copy: drvr._create_image(self.context, instance, disk_info['mapping'], fallback_from_host='fake-source-host') mock_copy.assert_called_once_with(src='fake-target', dest='fake-target', host='fake-source-host', receive=True) @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache') def test_create_image_resize_snap_backend(self, mock_cache): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) instance.task_state = task_states.RESIZE_FINISH image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) fake_backend = self.useFixture(fake_imagebackend.ImageBackendFixture()) drvr._create_image(self.context, instance, disk_info['mapping']) # Assert we called create_snap on the root disk fake_backend.disks['disk'].create_snap.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME) @mock.patch.object(utils, 'execute') def test_create_ephemeral_specified_fs(self, mock_exec): self.flags(default_ephemeral_format='ext3') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True, specified_fs='ext4') mock_exec.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '-L', 'myVol', '/dev/something', run_as_root=True) def test_create_ephemeral_specified_fs_not_valid(self): CONF.set_override('default_ephemeral_format', 'ext4') ephemerals = [{'device_type': 'disk', 'disk_bus': 'virtio', 'device_name': '/dev/vdb', 'guest_format': 'dummy', 'size': 1}] block_device_info = { 'ephemerals': ephemerals} instance_ref = self.test_instance instance_ref['image_ref'] = 1 instance = objects.Instance(**instance_ref) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) image_meta = objects.ImageMeta.from_dict({'disk_format': 'raw'}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) disk_info['mapping'].pop('disk.local') with test.nested( mock.patch.object(utils, 'execute'), mock.patch.object(drvr, 'get_info'), mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(imagebackend.Image, 'verify_base_size'), mock.patch.object(imagebackend.Image, 'get_disk_size')): self.assertRaises(exception.InvalidBDMFormat, drvr._create_image, context, instance, disk_info['mapping'], block_device_info=block_device_info) def test_create_ephemeral_default(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True) def test_create_ephemeral_with_conf(self): CONF.set_override('default_ephemeral_format', 'ext4') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True) def test_create_ephemeral_with_arbitrary(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND', {'linux': 'mkfs.ext4 --label %(fs_label)s %(target)s'}) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs.ext4', '--label', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True) def test_create_ephemeral_with_ext3(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND', {'linux': 'mkfs.ext3 --label %(fs_label)s %(target)s'}) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs.ext3', '--label', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True) def test_create_swap_default(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkswap', '/dev/something', run_as_root=False) self.mox.ReplayAll() drvr._create_swap('/dev/something', 1) def test_get_console_output_file(self): fake_libvirt_utils.files['console.log'] = '01234567890' with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 instance = objects.Instance(**instance_ref) console_dir = (os.path.join(tmpdir, instance['name'])) console_log = '%s/console.log' % (console_dir) fake_dom_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> </disk> <console type='file'> <source path='%s'/> <target port='0'/> </console> </devices> </domain> """ % console_log def fake_lookup(id): return FakeVirtDomain(fake_dom_xml) self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) try: prev_max = libvirt_driver.MAX_CONSOLE_BYTES libvirt_driver.MAX_CONSOLE_BYTES = 5 with mock.patch('os.path.exists', return_value=True): output = drvr.get_console_output(self.context, instance) finally: libvirt_driver.MAX_CONSOLE_BYTES = prev_max self.assertEqual('67890', output) def test_get_console_output_file_missing(self): with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 instance = objects.Instance(**instance_ref) console_log = os.path.join(tmpdir, instance['name'], 'non-existent.log') fake_dom_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> </disk> <console type='file'> <source path='%s'/> <target port='0'/> </console> </devices> </domain> """ % console_log def fake_lookup(id): return FakeVirtDomain(fake_dom_xml) self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch('os.path.exists', return_value=False): output = drvr.get_console_output(self.context, instance) self.assertEqual('', output) def test_get_console_output_pty(self): fake_libvirt_utils.files['pty'] = '01234567890' with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 instance = objects.Instance(**instance_ref) console_dir = (os.path.join(tmpdir, instance['name'])) pty_file = '%s/fake_pty' % (console_dir) fake_dom_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> </disk> <console type='pty'> <source path='%s'/> <target port='0'/> </console> </devices> </domain> """ % pty_file def fake_lookup(id): return FakeVirtDomain(fake_dom_xml) def _fake_flush(self, fake_pty): return 'foo' def _fake_append_to_file(self, data, fpath): return 'pty' self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush libvirt_driver.LibvirtDriver._append_to_file = _fake_append_to_file drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) try: prev_max = libvirt_driver.MAX_CONSOLE_BYTES libvirt_driver.MAX_CONSOLE_BYTES = 5 output = drvr.get_console_output(self.context, instance) finally: libvirt_driver.MAX_CONSOLE_BYTES = prev_max self.assertEqual('67890', output) @mock.patch('nova.virt.libvirt.host.Host.get_domain') @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_console_output_not_available(self, mock_get_xml, get_domain): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> </disk> <console type='foo'> <source path='srcpath'/> <target port='0'/> </console> </devices> </domain> """ mock_get_xml.return_value = xml get_domain.return_value = mock.MagicMock() instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.ConsoleNotAvailable, drvr.get_console_output, self.context, instance) def test_get_host_ip_addr(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ip = drvr.get_host_ip_addr() self.assertEqual(ip, CONF.my_ip) @mock.patch.object(libvirt_driver.LOG, 'warning') @mock.patch('nova.compute.utils.get_machine_ips') def test_get_host_ip_addr_failure(self, mock_ips, mock_log): mock_ips.return_value = ['8.8.8.8', '75.75.75.75'] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.get_host_ip_addr() mock_log.assert_called_once_with(u'my_ip address (%(my_ip)s) was ' u'not found on any of the ' u'interfaces: %(ifaces)s', {'ifaces': '8.8.8.8, 75.75.75.75', 'my_ip': mock.ANY}) def test_conn_event_handler(self): self.mox.UnsetStubs() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) service_mock = mock.MagicMock() service_mock.disabled.return_value = False with test.nested( mock.patch.object(drvr._host, "_connect", side_effect=fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "Failed to connect to host", error_code= fakelibvirt.VIR_ERR_INTERNAL_ERROR)), mock.patch.object(drvr._host, "_init_events", return_value=None), mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock)): # verify that the driver registers for the close callback # and re-connects after receiving the callback self.assertRaises(exception.HypervisorUnavailable, drvr.init_host, "wibble") self.assertTrue(service_mock.disabled) def test_command_with_broken_connection(self): self.mox.UnsetStubs() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) service_mock = mock.MagicMock() service_mock.disabled.return_value = False with test.nested( mock.patch.object(drvr._host, "_connect", side_effect=fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "Failed to connect to host", error_code= fakelibvirt.VIR_ERR_INTERNAL_ERROR)), mock.patch.object(drvr._host, "_init_events", return_value=None), mock.patch.object(host.Host, "has_min_version", return_value=True), mock.patch.object(drvr, "_do_quality_warnings", return_value=None), mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock), mock.patch.object(host.Host, "get_capabilities")): drvr.init_host("wibble") self.assertRaises(exception.HypervisorUnavailable, drvr.get_num_instances) self.assertTrue(service_mock.disabled) def test_service_resume_after_broken_connection(self): self.mox.UnsetStubs() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) service_mock = mock.MagicMock() service_mock.disabled.return_value = True with test.nested( mock.patch.object(drvr._host, "_connect", return_value=mock.MagicMock()), mock.patch.object(drvr._host, "_init_events", return_value=None), mock.patch.object(host.Host, "has_min_version", return_value=True), mock.patch.object(drvr, "_do_quality_warnings", return_value=None), mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock), mock.patch.object(host.Host, "get_capabilities")): drvr.init_host("wibble") drvr.get_num_instances() self.assertTrue(not service_mock.disabled and service_mock.disabled_reason is None) @mock.patch.object(objects.Instance, 'save') def test_immediate_delete(self, mock_save): def fake_get_domain(instance): raise exception.InstanceNotFound(instance_id=instance.uuid) def fake_delete_instance_files(instance): pass drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, {}) mock_save.assert_called_once_with() @mock.patch.object(objects.Instance, 'get_by_uuid') @mock.patch.object(objects.Instance, 'obj_load_attr', autospec=True) @mock.patch.object(objects.Instance, 'save', autospec=True) @mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy') @mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files') @mock.patch.object(libvirt_driver.LibvirtDriver, '_disconnect_volume') @mock.patch.object(driver, 'block_device_info_get_mapping') @mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain') def _test_destroy_removes_disk(self, mock_undefine_domain, mock_mapping, mock_disconnect_volume, mock_delete_instance_files, mock_destroy, mock_inst_save, mock_inst_obj_load_attr, mock_get_by_uuid, volume_fail=False): instance = objects.Instance(self.context, **self.test_instance) vol = {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]} mock_mapping.return_value = vol['block_device_mapping'] mock_delete_instance_files.return_value = True mock_get_by_uuid.return_value = instance if volume_fail: mock_disconnect_volume.return_value = ( exception.VolumeNotFound('vol')) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.destroy(self.context, instance, [], vol) def test_destroy_removes_disk(self): self._test_destroy_removes_disk(volume_fail=False) def test_destroy_removes_disk_volume_fails(self): self._test_destroy_removes_disk(volume_fail=True) @mock.patch.object(libvirt_driver.LibvirtDriver, 'unplug_vifs') @mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy') @mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain') def test_destroy_not_removes_disk(self, mock_undefine_domain, mock_destroy, mock_unplug_vifs): instance = fake_instance.fake_instance_obj( None, name='instancename', id=1, uuid='875a8070-d0b9-4949-8b31-104d125c9a64') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.destroy(self.context, instance, [], None, False) @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup') @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container') @mock.patch.object(host.Host, 'get_domain') def test_destroy_lxc_calls_teardown_container(self, mock_get_domain, mock_teardown_container, mock_cleanup): self.flags(virt_type='lxc', group='libvirt') fake_domain = FakeVirtDomain() def destroy_side_effect(*args, **kwargs): fake_domain._info[0] = power_state.SHUTDOWN with mock.patch.object(fake_domain, 'destroy', side_effect=destroy_side_effect) as mock_domain_destroy: mock_get_domain.return_value = fake_domain instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) network_info = [] drvr.destroy(self.context, instance, network_info, None, False) mock_get_domain.assert_has_calls([mock.call(instance), mock.call(instance)]) mock_domain_destroy.assert_called_once_with() mock_teardown_container.assert_called_once_with(instance) mock_cleanup.assert_called_once_with(self.context, instance, network_info, None, False, None) @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup') @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container') @mock.patch.object(host.Host, 'get_domain') def test_destroy_lxc_calls_teardown_container_when_no_domain(self, mock_get_domain, mock_teardown_container, mock_cleanup): self.flags(virt_type='lxc', group='libvirt') instance = objects.Instance(**self.test_instance) inf_exception = exception.InstanceNotFound(instance_id=instance.uuid) mock_get_domain.side_effect = inf_exception drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) network_info = [] drvr.destroy(self.context, instance, network_info, None, False) mock_get_domain.assert_has_calls([mock.call(instance), mock.call(instance)]) mock_teardown_container.assert_called_once_with(instance) mock_cleanup.assert_called_once_with(self.context, instance, network_info, None, False, None) def test_reboot_different_ids(self): class FakeLoopingCall(object): def start(self, *a, **k): return self def wait(self): return None self.flags(wait_soft_reboot_seconds=1, group='libvirt') info_tuple = ('fake', 'fake', 'fake', 'also_fake') self.reboot_create_called = False # Mock domain mock_domain = self.mox.CreateMock(fakelibvirt.virDomain) mock_domain.info().AndReturn( (libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple) mock_domain.ID().AndReturn('some_fake_id') mock_domain.ID().AndReturn('some_fake_id') mock_domain.shutdown() mock_domain.info().AndReturn( (libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple) mock_domain.ID().AndReturn('some_other_fake_id') mock_domain.ID().AndReturn('some_other_fake_id') self.mox.ReplayAll() def fake_get_domain(instance): return mock_domain def fake_create_domain(**kwargs): self.reboot_create_called = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, '_create_domain', fake_create_domain) self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall', lambda *a, **k: FakeLoopingCall()) self.stubs.Set(pci_manager, 'get_instance_pci_devs', lambda *a: []) drvr.reboot(None, instance, [], 'SOFT') self.assertTrue(self.reboot_create_called) @mock.patch.object(pci_manager, 'get_instance_pci_devs') @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall') @mock.patch.object(greenthread, 'sleep') @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def test_reboot_same_ids(self, mock_get_domain, mock_hard_reboot, mock_sleep, mock_loopingcall, mock_get_instance_pci_devs): class FakeLoopingCall(object): def start(self, *a, **k): return self def wait(self): return None self.flags(wait_soft_reboot_seconds=1, group='libvirt') info_tuple = ('fake', 'fake', 'fake', 'also_fake') self.reboot_hard_reboot_called = False # Mock domain mock_domain = mock.Mock(fakelibvirt.virDomain) return_values = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple, (libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple] mock_domain.info.side_effect = return_values mock_domain.ID.return_value = 'some_fake_id' mock_domain.shutdown.side_effect = mock.Mock() def fake_hard_reboot(*args, **kwargs): self.reboot_hard_reboot_called = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_get_domain.return_value = mock_domain mock_hard_reboot.side_effect = fake_hard_reboot mock_loopingcall.return_value = FakeLoopingCall() mock_get_instance_pci_devs.return_value = [] drvr.reboot(None, instance, [], 'SOFT') self.assertTrue(self.reboot_hard_reboot_called) @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def test_soft_reboot_libvirt_exception(self, mock_get_domain, mock_hard_reboot): # Tests that a hard reboot is performed when a soft reboot results # in raising a libvirtError. info_tuple = ('fake', 'fake', 'fake', 'also_fake') # setup mocks mock_virDomain = mock.Mock(fakelibvirt.virDomain) mock_virDomain.info.return_value = ( (libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple) mock_virDomain.ID.return_value = 'some_fake_id' mock_virDomain.shutdown.side_effect = fakelibvirt.libvirtError('Err') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) context = None instance = objects.Instance(**self.test_instance) network_info = [] mock_get_domain.return_value = mock_virDomain drvr.reboot(context, instance, network_info, 'SOFT') @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def _test_resume_state_on_host_boot_with_state(self, state, mock_get_domain, mock_hard_reboot): mock_virDomain = mock.Mock(fakelibvirt.virDomain) mock_virDomain.info.return_value = ([state, None, None, None, None]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_domain.return_value = mock_virDomain instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) drvr.resume_state_on_host_boot(self.context, instance, network_info, block_device_info=None) ignored_states = (power_state.RUNNING, power_state.SUSPENDED, power_state.NOSTATE, power_state.PAUSED) self.assertEqual(mock_hard_reboot.called, state not in ignored_states) def test_resume_state_on_host_boot_with_running_state(self): self._test_resume_state_on_host_boot_with_state(power_state.RUNNING) def test_resume_state_on_host_boot_with_suspended_state(self): self._test_resume_state_on_host_boot_with_state(power_state.SUSPENDED) def test_resume_state_on_host_boot_with_paused_state(self): self._test_resume_state_on_host_boot_with_state(power_state.PAUSED) def test_resume_state_on_host_boot_with_nostate(self): self._test_resume_state_on_host_boot_with_state(power_state.NOSTATE) def test_resume_state_on_host_boot_with_shutdown_state(self): self._test_resume_state_on_host_boot_with_state(power_state.RUNNING) def test_resume_state_on_host_boot_with_crashed_state(self): self._test_resume_state_on_host_boot_with_state(power_state.CRASHED) @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def test_resume_state_on_host_boot_with_instance_not_found_on_driver( self, mock_get_domain, mock_hard_reboot): instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_domain.side_effect = exception.InstanceNotFound( instance_id='fake') drvr.resume_state_on_host_boot(self.context, instance, network_info=[], block_device_info=None) mock_hard_reboot.assert_called_once_with(self.context, instance, [], None) @mock.patch('nova.virt.libvirt.LibvirtDriver._undefine_domain') @mock.patch('nova.virt.libvirt.LibvirtDriver.get_info') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_xml') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info') @mock.patch('nova.virt.libvirt.blockinfo.get_disk_info') @mock.patch('nova.virt.libvirt.LibvirtDriver._destroy') def test_hard_reboot(self, mock_destroy, mock_get_disk_info, mock_get_instance_disk_info, mock_get_guest_xml, mock_create_images_and_backing, mock_create_domain_and_network, mock_get_info, mock_undefine): self.context.auth_token = True # any non-None value will suffice instance = objects.Instance(**self.test_instance) instance_path = libvirt_utils.get_instance_path(instance) network_info = _fake_network_info(self, 1) block_device_info = None dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='file'><driver name='qemu' type='raw'/>" "<source file='/test/disk'/>" "<target dev='vda' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/test/disk.local'/>" "<target dev='vdb' bus='virtio'/></disk>" "</devices></domain>") drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) return_values = [hardware.InstanceInfo(state=power_state.SHUTDOWN), hardware.InstanceInfo(state=power_state.RUNNING)] mock_get_info.side_effect = return_values backing_disk_info = [{"virt_disk_size": 2}] mock_get_disk_info.return_value = mock.sentinel.disk_info mock_get_guest_xml.return_value = dummyxml mock_get_instance_disk_info.return_value = backing_disk_info drvr._hard_reboot(self.context, instance, network_info, block_device_info) mock_destroy.assert_called_once_with(instance) mock_undefine.assert_called_once_with(instance) # make sure that _create_images_and_backing is passed the disk_info # returned from _get_instance_disk_info and not the one that is in # scope from blockinfo.get_disk_info mock_create_images_and_backing.assert_called_once_with(self.context, instance, instance_path, backing_disk_info) # make sure that _create_domain_and_network is passed the disk_info # returned from blockinfo.get_disk_info and not the one that's # returned from _get_instance_disk_info mock_create_domain_and_network.assert_called_once_with(self.context, dummyxml, instance, network_info, mock.sentinel.disk_info, block_device_info=block_device_info, reboot=True, vifs_already_plugged=True) @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall') @mock.patch('nova.pci.manager.get_instance_pci_devs') @mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info') @mock.patch('nova.virt.libvirt.utils.write_to_file') @mock.patch('nova.virt.libvirt.utils.get_instance_path') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_config') @mock.patch('nova.virt.libvirt.blockinfo.get_disk_info') @mock.patch('nova.virt.libvirt.LibvirtDriver._destroy') def test_hard_reboot_does_not_call_glance_show(self, mock_destroy, mock_get_disk_info, mock_get_guest_config, mock_get_instance_path, mock_write_to_file, mock_get_instance_disk_info, mock_create_images_and_backing, mock_create_domand_and_network, mock_prepare_pci_devices_for_use, mock_get_instance_pci_devs, mock_looping_call, mock_ensure_tree): """For a hard reboot, we shouldn't need an additional call to glance to get the image metadata. This is important for automatically spinning up instances on a host-reboot, since we won't have a user request context that'll allow the Glance request to go through. We have to rely on the cached image metadata, instead. https://bugs.launchpad.net/nova/+bug/1339386 """ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) network_info = mock.MagicMock() block_device_info = mock.MagicMock() mock_get_disk_info.return_value = {} mock_get_guest_config.return_value = mock.MagicMock() mock_get_instance_path.return_value = '/foo' mock_looping_call.return_value = mock.MagicMock() drvr._image_api = mock.MagicMock() drvr._hard_reboot(self.context, instance, network_info, block_device_info) self.assertFalse(drvr._image_api.get.called) mock_ensure_tree.assert_called_once_with('/foo') def test_suspend(self): guest = libvirt_guest.Guest(FakeVirtDomain(id=1)) dom = guest._domain instance = objects.Instance(**self.test_instance) instance.ephemeral_key_uuid = None conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) @mock.patch.object(dmcrypt, 'delete_volume') @mock.patch.object(conn, '_get_instance_disk_info', return_value=[]) @mock.patch.object(conn, '_detach_sriov_ports') @mock.patch.object(conn, '_detach_pci_devices') @mock.patch.object(pci_manager, 'get_instance_pci_devs', return_value='pci devs') @mock.patch.object(conn._host, 'get_guest', return_value=guest) def suspend(mock_get_guest, mock_get_instance_pci_devs, mock_detach_pci_devices, mock_detach_sriov_ports, mock_get_instance_disk_info, mock_delete_volume): mock_managedSave = mock.Mock() dom.managedSave = mock_managedSave conn.suspend(self.context, instance) mock_managedSave.assert_called_once_with(0) self.assertFalse(mock_get_instance_disk_info.called) mock_delete_volume.assert_has_calls([mock.call(disk['path']) for disk in mock_get_instance_disk_info.return_value], False) suspend() @mock.patch.object(time, 'sleep') @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_domain') @mock.patch.object(host.Host, 'get_domain') def _test_clean_shutdown(self, mock_get_domain, mock_create_domain, mock_sleep, seconds_to_shutdown, timeout, retry_interval, shutdown_attempts, succeeds): info_tuple = ('fake', 'fake', 'fake', 'also_fake') shutdown_count = [] # Mock domain mock_domain = mock.Mock(fakelibvirt.virDomain) return_infos = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple] return_shutdowns = [shutdown_count.append("shutdown")] retry_countdown = retry_interval for x in range(min(seconds_to_shutdown, timeout)): return_infos.append( (libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple) if retry_countdown == 0: return_shutdowns.append(shutdown_count.append("shutdown")) retry_countdown = retry_interval else: retry_countdown -= 1 if seconds_to_shutdown < timeout: return_infos.append( (libvirt_guest.VIR_DOMAIN_SHUTDOWN,) + info_tuple) mock_domain.info.side_effect = return_infos mock_domain.shutdown.side_effect = return_shutdowns def fake_create_domain(**kwargs): self.reboot_create_called = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_get_domain.return_value = mock_domain mock_create_domain.side_effect = fake_create_domain result = drvr._clean_shutdown(instance, timeout, retry_interval) self.assertEqual(succeeds, result) self.assertEqual(shutdown_attempts, len(shutdown_count)) def test_clean_shutdown_first_time(self): self._test_clean_shutdown(seconds_to_shutdown=2, timeout=5, retry_interval=3, shutdown_attempts=1, succeeds=True) def test_clean_shutdown_with_retry(self): self._test_clean_shutdown(seconds_to_shutdown=4, timeout=5, retry_interval=3, shutdown_attempts=2, succeeds=True) def test_clean_shutdown_failure(self): self._test_clean_shutdown(seconds_to_shutdown=6, timeout=5, retry_interval=3, shutdown_attempts=2, succeeds=False) def test_clean_shutdown_no_wait(self): self._test_clean_shutdown(seconds_to_shutdown=6, timeout=0, retry_interval=3, shutdown_attempts=1, succeeds=False) @mock.patch.object(FakeVirtDomain, 'attachDeviceFlags') @mock.patch.object(FakeVirtDomain, 'ID', return_value=1) @mock.patch.object(utils, 'get_image_from_system_metadata', return_value=None) def test_attach_sriov_ports(self, mock_get_image_metadata, mock_ID, mock_attachDevice): instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT guest = libvirt_guest.Guest(FakeVirtDomain()) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._attach_sriov_ports(self.context, instance, guest, network_info) mock_get_image_metadata.assert_called_once_with( instance.system_metadata) self.assertTrue(mock_attachDevice.called) @mock.patch.object(FakeVirtDomain, 'attachDeviceFlags') @mock.patch.object(FakeVirtDomain, 'ID', return_value=1) @mock.patch.object(utils, 'get_image_from_system_metadata', return_value=None) def test_attach_sriov_direct_physical_ports(self, mock_get_image_metadata, mock_ID, mock_attachDevice): instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT_PHYSICAL guest = libvirt_guest.Guest(FakeVirtDomain()) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._attach_sriov_ports(self.context, instance, guest, network_info) mock_get_image_metadata.assert_called_once_with( instance.system_metadata) self.assertTrue(mock_attachDevice.called) @mock.patch.object(FakeVirtDomain, 'attachDeviceFlags') @mock.patch.object(FakeVirtDomain, 'ID', return_value=1) @mock.patch.object(utils, 'get_image_from_system_metadata', return_value=None) def test_attach_sriov_ports_with_info_cache(self, mock_get_image_metadata, mock_ID, mock_attachDevice): instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT instance.info_cache = objects.InstanceInfoCache( network_info=network_info) guest = libvirt_guest.Guest(FakeVirtDomain()) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._attach_sriov_ports(self.context, instance, guest, None) mock_get_image_metadata.assert_called_once_with( instance.system_metadata) self.assertTrue(mock_attachDevice.called) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def _test_detach_sriov_ports(self, mock_has_min_version, vif_type): instance = objects.Instance(**self.test_instance) expeted_pci_slot = "0000:00:00.0" network_info = _fake_network_info(self, 1) network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT # some more adjustments for the fake network_info so that # the correct get_config function will be executed (vif's # get_config_hw_veb - which is according to the real SRIOV vif) # and most importantly the pci_slot which is translated to # cfg.source_dev, then to PciDevice.address and sent to # _detach_pci_devices network_info[0]['profile'] = dict(pci_slot=expeted_pci_slot) network_info[0]['type'] = vif_type network_info[0]['details'] = dict(vlan="2145") instance.info_cache = objects.InstanceInfoCache( network_info=network_info) # fill the pci_devices of the instance so that # pci_manager.get_instance_pci_devs will not return an empty list # which will eventually fail the assertion for detachDeviceFlags expected_pci_device_obj = ( objects.PciDevice(address=expeted_pci_slot, request_id=None)) instance.pci_devices = objects.PciDeviceList() instance.pci_devices.objects = [expected_pci_device_obj] domain = FakeVirtDomain() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) guest = libvirt_guest.Guest(domain) with mock.patch.object(drvr, '_detach_pci_devices') as mock_detach_pci: drvr._detach_sriov_ports(self.context, instance, guest) mock_detach_pci.assert_called_once_with( guest, [expected_pci_device_obj]) def test_detach_sriov_ports_interface_interface_hostdev(self): # Note: test detach_sriov_ports method for vif with config # LibvirtConfigGuestInterface self._test_detach_sriov_ports(vif_type="hw_veb") def test_detach_sriov_ports_interface_pci_hostdev(self): # Note: test detach_sriov_ports method for vif with config # LibvirtConfigGuestHostdevPCI self._test_detach_sriov_ports(vif_type="ib_hostdev") @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(FakeVirtDomain, 'detachDeviceFlags') def test_detach_duplicate_mac_sriov_ports(self, mock_detachDeviceFlags, mock_has_min_version): instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 2) for network_info_inst in network_info: network_info_inst['vnic_type'] = network_model.VNIC_TYPE_DIRECT network_info_inst['type'] = "hw_veb" network_info_inst['details'] = dict(vlan="2145") network_info_inst['address'] = "fa:16:3e:96:2a:48" network_info[0]['profile'] = dict(pci_slot="0000:00:00.0") network_info[1]['profile'] = dict(pci_slot="0000:00:00.1") instance.info_cache = objects.InstanceInfoCache( network_info=network_info) # fill the pci_devices of the instance so that # pci_manager.get_instance_pci_devs will not return an empty list # which will eventually fail the assertion for detachDeviceFlags instance.pci_devices = objects.PciDeviceList() instance.pci_devices.objects = [ objects.PciDevice(address='0000:00:00.0', request_id=None), objects.PciDevice(address='0000:00:00.1', request_id=None) ] domain = FakeVirtDomain() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) guest = libvirt_guest.Guest(domain) drvr._detach_sriov_ports(self.context, instance, guest) expected_xml = [ ('<hostdev mode="subsystem" type="pci" managed="yes">\n' ' <source>\n' ' <address bus="0x00" domain="0x0000" \ function="0x0" slot="0x00"/>\n' ' </source>\n' '</hostdev>\n'), ('<hostdev mode="subsystem" type="pci" managed="yes">\n' ' <source>\n' ' <address bus="0x00" domain="0x0000" \ function="0x1" slot="0x00"/>\n' ' </source>\n' '</hostdev>\n') ] mock_detachDeviceFlags.has_calls([ mock.call(expected_xml[0], flags=1), mock.call(expected_xml[1], flags=1) ]) def test_resume(self): dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='file'><driver name='qemu' type='raw'/>" "<source file='/test/disk'/>" "<target dev='vda' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/test/disk.local'/>" "<target dev='vdb' bus='virtio'/></disk>" "</devices></domain>") instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) block_device_info = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) guest = libvirt_guest.Guest('fake_dom') with test.nested( mock.patch.object(drvr, '_get_existing_domain_xml', return_value=dummyxml), mock.patch.object(drvr, '_create_domain_and_network', return_value=guest), mock.patch.object(drvr, '_attach_pci_devices'), mock.patch.object(pci_manager, 'get_instance_pci_devs', return_value='fake_pci_devs'), mock.patch.object(utils, 'get_image_from_system_metadata'), mock.patch.object(blockinfo, 'get_disk_info'), ) as (_get_existing_domain_xml, _create_domain_and_network, _attach_pci_devices, get_instance_pci_devs, get_image_metadata, get_disk_info): get_image_metadata.return_value = {'bar': 234} disk_info = {'foo': 123} get_disk_info.return_value = disk_info drvr.resume(self.context, instance, network_info, block_device_info) _get_existing_domain_xml.assert_has_calls([mock.call(instance, network_info, block_device_info)]) _create_domain_and_network.assert_has_calls([mock.call( self.context, dummyxml, instance, network_info, disk_info, block_device_info=block_device_info, vifs_already_plugged=True)]) _attach_pci_devices.assert_has_calls([mock.call(guest, 'fake_pci_devs')]) @mock.patch.object(host.Host, 'get_domain') @mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') @mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files') @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines(self, mock_save, mock_delete_instance_files, mock_get_info, mock_get_domain): dom_mock = mock.MagicMock() dom_mock.undefineFlags.return_value = 1 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_domain.return_value = dom_mock mock_get_info.return_value = hardware.InstanceInfo( state=power_state.SHUTDOWN, id=-1) mock_delete_instance_files.return_value = None instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() @mock.patch.object(rbd_utils, 'RBDDriver') def test_cleanup_rbd(self, mock_driver): driver = mock_driver.return_value driver.cleanup_volumes = mock.Mock() fake_instance = {'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr._cleanup_rbd(fake_instance) driver.cleanup_volumes.assert_called_once_with(fake_instance) @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines_no_undefine_flags(self, mock_save): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy() mock.undefineFlags(1).AndRaise(fakelibvirt.libvirtError('Err')) mock.ID().AndReturn(123) mock.undefine() self.mox.ReplayAll() def fake_get_domain(instance): return mock def fake_get_info(instance_name): return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1) def fake_delete_instance_files(instance): return None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines_no_attribute_with_managed_save(self, mock_save): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy() mock.undefineFlags(1).AndRaise(AttributeError()) mock.hasManagedSaveImage(0).AndReturn(True) mock.managedSaveRemove(0) mock.undefine() self.mox.ReplayAll() def fake_get_domain(instance): return mock def fake_get_info(instance_name): return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1) def fake_delete_instance_files(instance): return None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines_no_attribute_no_managed_save(self, mock_save): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy() mock.undefineFlags(1).AndRaise(AttributeError()) mock.hasManagedSaveImage(0).AndRaise(AttributeError()) mock.undefine() self.mox.ReplayAll() def fake_get_domain(self, instance): return mock def fake_get_info(instance_name): return hardware.InstanceInfo(state=power_state.SHUTDOWN) def fake_delete_instance_files(instance): return None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() def test_destroy_timed_out(self): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy().AndRaise(fakelibvirt.libvirtError("timed out")) self.mox.ReplayAll() def fake_get_domain(self, instance): return mock def fake_get_error_code(self): return fakelibvirt.VIR_ERR_OPERATION_TIMEOUT drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.stubs.Set(fakelibvirt.libvirtError, 'get_error_code', fake_get_error_code) instance = objects.Instance(**self.test_instance) self.assertRaises(exception.InstancePowerOffFailure, drvr.destroy, self.context, instance, []) def test_private_destroy_not_found(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "No such domain", error_code=fakelibvirt.VIR_ERR_NO_DOMAIN) mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy().AndRaise(ex) mock.info().AndRaise(ex) mock.UUIDString() self.mox.ReplayAll() def fake_get_domain(instance): return mock drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) instance = objects.Instance(**self.test_instance) # NOTE(vish): verifies destroy doesn't raise if the instance disappears drvr._destroy(instance) def test_private_destroy_lxc_processes_refused_to_die(self): self.flags(virt_type='lxc', group='libvirt') ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "", error_message="internal error: Some processes refused to die", error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR) conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(conn._host, 'get_domain') as mock_get_domain, \ mock.patch.object(conn, 'get_info') as mock_get_info: mock_domain = mock.MagicMock() mock_domain.ID.return_value = 1 mock_get_domain.return_value = mock_domain mock_domain.destroy.side_effect = ex mock_info = mock.MagicMock() mock_info.id = 1 mock_info.state = power_state.SHUTDOWN mock_get_info.return_value = mock_info instance = objects.Instance(**self.test_instance) conn._destroy(instance) def test_private_destroy_processes_refused_to_die_still_raises(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "", error_message="internal error: Some processes refused to die", error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR) conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(conn._host, 'get_domain') as mock_get_domain: mock_domain = mock.MagicMock() mock_domain.ID.return_value = 1 mock_get_domain.return_value = mock_domain mock_domain.destroy.side_effect = ex instance = objects.Instance(**self.test_instance) self.assertRaises(fakelibvirt.libvirtError, conn._destroy, instance) def test_private_destroy_ebusy_timeout(self): # Tests that _destroy will retry 3 times to destroy the guest when an # EBUSY is raised, but eventually times out and raises the libvirtError ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, ("Failed to terminate process 26425 with SIGKILL: " "Device or resource busy"), error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR, int1=errno.EBUSY) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.poweroff = mock.Mock(side_effect=ex) instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(drvr._host, 'get_guest', return_value=mock_guest): self.assertRaises(fakelibvirt.libvirtError, drvr._destroy, instance) self.assertEqual(3, mock_guest.poweroff.call_count) def test_private_destroy_ebusy_multiple_attempt_ok(self): # Tests that the _destroy attempt loop is broken when EBUSY is no # longer raised. ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, ("Failed to terminate process 26425 with SIGKILL: " "Device or resource busy"), error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR, int1=errno.EBUSY) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.poweroff = mock.Mock(side_effect=[ex, None]) inst_info = hardware.InstanceInfo(power_state.SHUTDOWN, id=1) instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(drvr._host, 'get_guest', return_value=mock_guest): with mock.patch.object(drvr, 'get_info', return_value=inst_info): drvr._destroy(instance) self.assertEqual(2, mock_guest.poweroff.call_count) def test_undefine_domain_with_not_found_instance(self): def fake_get_domain(self, instance): raise exception.InstanceNotFound(instance_id=instance.uuid) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.mox.StubOutWithMock(fakelibvirt.libvirtError, "get_error_code") self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) # NOTE(wenjianhn): verifies undefine doesn't raise if the # instance disappears drvr._undefine_domain(instance) @mock.patch.object(host.Host, "list_instance_domains") @mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid") @mock.patch.object(objects.InstanceList, "get_by_filters") def test_disk_over_committed_size_total(self, mock_get, mock_bdms, mock_list): # Ensure destroy calls managedSaveRemove for saved instance. class DiagFakeDomain(object): def __init__(self, name): self._name = name self._uuid = str(uuid.uuid4()) def ID(self): return 1 def name(self): return self._name def UUIDString(self): return self._uuid def XMLDesc(self, flags): return "<domain/>" instance_domains = [ DiagFakeDomain("instance0000001"), DiagFakeDomain("instance0000002")] mock_list.return_value = instance_domains drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) fake_disks = {'instance0000001': [{'type': 'qcow2', 'path': '/somepath/disk1', 'virt_disk_size': '10737418240', 'backing_file': '/somepath/disk1', 'disk_size': '83886080', 'over_committed_disk_size': '10653532160'}], 'instance0000002': [{'type': 'raw', 'path': '/somepath/disk2', 'virt_disk_size': '0', 'backing_file': '/somepath/disk2', 'disk_size': '10737418240', 'over_committed_disk_size': '0'}]} def get_info(instance_name, xml, **kwargs): return fake_disks.get(instance_name) instance_uuids = [dom.UUIDString() for dom in instance_domains] instances = [objects.Instance( uuid=instance_uuids[0], root_device_name='/dev/vda'), objects.Instance( uuid=instance_uuids[1], root_device_name='/dev/vdb') ] mock_get.return_value = instances with mock.patch.object(drvr, "_get_instance_disk_info") as mock_info: mock_info.side_effect = get_info result = drvr._get_disk_over_committed_size_total() self.assertEqual(result, 10653532160) mock_list.assert_called_once_with() self.assertEqual(2, mock_info.call_count) filters = {'uuid': instance_uuids} mock_get.assert_called_once_with(mock.ANY, filters, use_slave=True) mock_bdms.assert_called_with(mock.ANY, instance_uuids) @mock.patch.object(host.Host, "list_instance_domains") @mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid") @mock.patch.object(objects.InstanceList, "get_by_filters") def test_disk_over_committed_size_total_eperm(self, mock_get, mock_bdms, mock_list): # Ensure destroy calls managedSaveRemove for saved instance. class DiagFakeDomain(object): def __init__(self, name): self._name = name self._uuid = str(uuid.uuid4()) def ID(self): return 1 def name(self): return self._name def UUIDString(self): return self._uuid def XMLDesc(self, flags): return "<domain/>" instance_domains = [ DiagFakeDomain("instance0000001"), DiagFakeDomain("instance0000002"), DiagFakeDomain("instance0000003"), DiagFakeDomain("instance0000004")] mock_list.return_value = instance_domains drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) fake_disks = {'instance0000001': [{'type': 'qcow2', 'path': '/somepath/disk1', 'virt_disk_size': '10737418240', 'backing_file': '/somepath/disk1', 'disk_size': '83886080', 'over_committed_disk_size': '10653532160'}], 'instance0000002': [{'type': 'raw', 'path': '/somepath/disk2', 'virt_disk_size': '0', 'backing_file': '/somepath/disk2', 'disk_size': '10737418240', 'over_committed_disk_size': '21474836480'}], 'instance0000003': [{'type': 'raw', 'path': '/somepath/disk3', 'virt_disk_size': '0', 'backing_file': '/somepath/disk3', 'disk_size': '21474836480', 'over_committed_disk_size': '32212254720'}], 'instance0000004': [{'type': 'raw', 'path': '/somepath/disk4', 'virt_disk_size': '0', 'backing_file': '/somepath/disk4', 'disk_size': '32212254720', 'over_committed_disk_size': '42949672960'}]} def side_effect(name, dom, block_device_info): if name == 'instance0000001': self.assertEqual('/dev/vda', block_device_info['root_device_name']) raise OSError(errno.ENOENT, 'No such file or directory') if name == 'instance0000002': self.assertEqual('/dev/vdb', block_device_info['root_device_name']) raise OSError(errno.ESTALE, 'Stale NFS file handle') if name == 'instance0000003': self.assertEqual('/dev/vdc', block_device_info['root_device_name']) raise OSError(errno.EACCES, 'Permission denied') if name == 'instance0000004': self.assertEqual('/dev/vdd', block_device_info['root_device_name']) return fake_disks.get(name) get_disk_info = mock.Mock() get_disk_info.side_effect = side_effect drvr._get_instance_disk_info = get_disk_info instance_uuids = [dom.UUIDString() for dom in instance_domains] instances = [objects.Instance( uuid=instance_uuids[0], root_device_name='/dev/vda'), objects.Instance( uuid=instance_uuids[1], root_device_name='/dev/vdb'), objects.Instance( uuid=instance_uuids[2], root_device_name='/dev/vdc'), objects.Instance( uuid=instance_uuids[3], root_device_name='/dev/vdd') ] mock_get.return_value = instances result = drvr._get_disk_over_committed_size_total() self.assertEqual(42949672960, result) mock_list.assert_called_once_with() self.assertEqual(4, get_disk_info.call_count) filters = {'uuid': instance_uuids} mock_get.assert_called_once_with(mock.ANY, filters, use_slave=True) mock_bdms.assert_called_with(mock.ANY, instance_uuids) @mock.patch.object(host.Host, "list_instance_domains", return_value=[mock.MagicMock(name='foo')]) @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_instance_disk_info", side_effect=exception.VolumeBDMPathNotFound(path='bar')) @mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid") @mock.patch.object(objects.InstanceList, "get_by_filters") def test_disk_over_committed_size_total_bdm_not_found(self, mock_get, mock_bdms, mock_get_disk_info, mock_list_domains): # Tests that we handle VolumeBDMPathNotFound gracefully. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(0, drvr._get_disk_over_committed_size_total()) def test_cpu_info(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigCPU() cpu.model = "Opteron_G4" cpu.vendor = "AMD" cpu.arch = arch.X86_64 cpu.cells = 1 cpu.cores = 2 cpu.threads = 1 cpu.sockets = 4 cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic")) cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow")) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu guest = vconfig.LibvirtConfigGuest() guest.ostype = vm_mode.HVM guest.arch = arch.X86_64 guest.domtype = ["kvm"] caps.guests.append(guest) guest = vconfig.LibvirtConfigGuest() guest.ostype = vm_mode.HVM guest.arch = arch.I686 guest.domtype = ["kvm"] caps.guests.append(guest) return caps self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) want = {"vendor": "AMD", "features": set(["extapic", "3dnow"]), "model": "Opteron_G4", "arch": arch.X86_64, "topology": {"cells": 1, "cores": 2, "threads": 1, "sockets": 4}} got = drvr._get_cpu_info() self.assertEqual(want, got) def test_get_pcidev_info(self): def fake_nodeDeviceLookupByName(self, name): return FakeNodeDevice(_fake_NodeDevXml[name]) self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name') host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object( fakelibvirt.Connection, 'getLibVersion') as mock_lib_version: mock_lib_version.return_value = ( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION) - 1) actualvf = drvr._get_pcidev_info("pci_0000_04_00_3") expect_vf = { "dev_id": "pci_0000_04_00_3", "address": "0000:04:00.3", "product_id": '1521', "numa_node": None, "vendor_id": '8086', "label": 'label_8086_1521', "dev_type": fields.PciDeviceType.SRIOV_PF, } self.assertEqual(expect_vf, actualvf) actualvf = drvr._get_pcidev_info("pci_0000_04_10_7") expect_vf = { "dev_id": "pci_0000_04_10_7", "address": "0000:04:10.7", "product_id": '1520', "numa_node": None, "vendor_id": '8086', "label": 'label_8086_1520', "dev_type": fields.PciDeviceType.SRIOV_VF, "parent_addr": '0000:04:00.3', } self.assertEqual(expect_vf, actualvf) actualvf = drvr._get_pcidev_info("pci_0000_04_11_7") expect_vf = { "dev_id": "pci_0000_04_11_7", "address": "0000:04:11.7", "product_id": '1520', "vendor_id": '8086', "numa_node": 0, "label": 'label_8086_1520', "dev_type": fields.PciDeviceType.SRIOV_VF, "parent_addr": '0000:04:00.3', } self.assertEqual(expect_vf, actualvf) with mock.patch.object( pci_utils, 'is_physical_function', return_value=True): actualvf = drvr._get_pcidev_info("pci_0000_04_00_1") expect_vf = { "dev_id": "pci_0000_04_00_1", "address": "0000:04:00.1", "product_id": '1013', "numa_node": 0, "vendor_id": '15b3', "label": 'label_15b3_1013', "dev_type": fields.PciDeviceType.SRIOV_PF, } self.assertEqual(expect_vf, actualvf) with mock.patch.object( pci_utils, 'is_physical_function', return_value=False): actualvf = drvr._get_pcidev_info("pci_0000_04_00_1") expect_vf = { "dev_id": "pci_0000_04_00_1", "address": "0000:04:00.1", "product_id": '1013', "numa_node": 0, "vendor_id": '15b3', "label": 'label_15b3_1013', "dev_type": fields.PciDeviceType.STANDARD, } self.assertEqual(expect_vf, actualvf) with mock.patch.object( fakelibvirt.Connection, 'getLibVersion') as mock_lib_version: mock_lib_version.return_value = ( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION)) actualvf = drvr._get_pcidev_info("pci_0000_03_00_0") expect_vf = { "dev_id": "pci_0000_03_00_0", "address": "0000:03:00.0", "product_id": '1013', "numa_node": 0, "vendor_id": '15b3', "label": 'label_15b3_1013', "dev_type": fields.PciDeviceType.SRIOV_PF, } self.assertEqual(expect_vf, actualvf) actualvf = drvr._get_pcidev_info("pci_0000_03_00_1") expect_vf = { "dev_id": "pci_0000_03_00_1", "address": "0000:03:00.1", "product_id": '1013', "numa_node": 0, "vendor_id": '15b3', "label": 'label_15b3_1013', "dev_type": fields.PciDeviceType.SRIOV_PF, } self.assertEqual(expect_vf, actualvf) def test_list_devices_not_supported(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Handle just the NO_SUPPORT error not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virNodeNumOfDevices', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) with mock.patch.object(drvr._conn, 'listDevices', side_effect=not_supported_exc): self.assertEqual('[]', drvr._get_pci_passthrough_devices()) # We cache not supported status to avoid emitting too many logging # messages. Clear this value to test the other exception case. del drvr._list_devices_supported # Other errors should not be caught other_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'other exc', error_code=fakelibvirt.VIR_ERR_NO_DOMAIN) with mock.patch.object(drvr._conn, 'listDevices', side_effect=other_exc): self.assertRaises(fakelibvirt.libvirtError, drvr._get_pci_passthrough_devices) def test_get_pci_passthrough_devices(self): def fakelistDevices(caps, fakeargs=0): return ['pci_0000_04_00_3', 'pci_0000_04_10_7', 'pci_0000_04_11_7'] self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn') libvirt_driver.LibvirtDriver._conn.listDevices = fakelistDevices def fake_nodeDeviceLookupByName(self, name): return FakeNodeDevice(_fake_NodeDevXml[name]) self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name') host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) actjson = drvr._get_pci_passthrough_devices() expectvfs = [ { "dev_id": "pci_0000_04_00_3", "address": "0000:04:00.3", "product_id": '1521', "vendor_id": '8086', "dev_type": fields.PciDeviceType.SRIOV_PF, "phys_function": None, "numa_node": None}, { "dev_id": "pci_0000_04_10_7", "domain": 0, "address": "0000:04:10.7", "product_id": '1520', "vendor_id": '8086', "numa_node": None, "dev_type": fields.PciDeviceType.SRIOV_VF, "phys_function": [('0x0000', '0x04', '0x00', '0x3')]}, { "dev_id": "pci_0000_04_11_7", "domain": 0, "address": "0000:04:11.7", "product_id": '1520', "vendor_id": '8086', "numa_node": 0, "dev_type": fields.PciDeviceType.SRIOV_VF, "phys_function": [('0x0000', '0x04', '0x00', '0x3')], } ] actualvfs = jsonutils.loads(actjson) for dev in range(len(actualvfs)): for key in actualvfs[dev].keys(): if key not in ['phys_function', 'virt_functions', 'label']: self.assertEqual(expectvfs[dev][key], actualvfs[dev][key]) def _fake_caps_numa_topology(self, cells_per_host=4, sockets_per_cell=1, cores_per_socket=1, threads_per_core=2, kb_mem=1048576): # Generate mempages list per cell cell_mempages = list() for cellid in range(cells_per_host): mempages_0 = vconfig.LibvirtConfigCapsNUMAPages() mempages_0.size = 4 mempages_0.total = 1024 * cellid mempages_1 = vconfig.LibvirtConfigCapsNUMAPages() mempages_1.size = 2048 mempages_1.total = 0 + cellid cell_mempages.append([mempages_0, mempages_1]) topology = fakelibvirt.HostInfo._gen_numa_topology(cells_per_host, sockets_per_cell, cores_per_socket, threads_per_core, kb_mem=kb_mem, numa_mempages_list=cell_mempages) return topology def _test_get_host_numa_topology(self, mempages): caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = arch.X86_64 caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) expected_topo_dict = {'cells': [ {'cpus': '0,1', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 0}, {'cpus': '3', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 1}, {'cpus': '', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 2}, {'cpus': '', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 3}]} with test.nested( mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([0, 1, 3, 4, 5])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set([0, 1, 2, 3, 6])), ): got_topo = drvr._get_host_numa_topology() got_topo_dict = got_topo._to_dict() self.assertThat( expected_topo_dict, matchers.DictMatches(got_topo_dict)) if mempages: # cells 0 self.assertEqual(4, got_topo.cells[0].mempages[0].size_kb) self.assertEqual(0, got_topo.cells[0].mempages[0].total) self.assertEqual(2048, got_topo.cells[0].mempages[1].size_kb) self.assertEqual(0, got_topo.cells[0].mempages[1].total) # cells 1 self.assertEqual(4, got_topo.cells[1].mempages[0].size_kb) self.assertEqual(1024, got_topo.cells[1].mempages[0].total) self.assertEqual(2048, got_topo.cells[1].mempages[1].size_kb) self.assertEqual(1, got_topo.cells[1].mempages[1].total) else: self.assertEqual([], got_topo.cells[0].mempages) self.assertEqual([], got_topo.cells[1].mempages) self.assertEqual(expected_topo_dict, got_topo_dict) self.assertEqual(set([]), got_topo.cells[0].pinned_cpus) self.assertEqual(set([]), got_topo.cells[1].pinned_cpus) self.assertEqual(set([]), got_topo.cells[2].pinned_cpus) self.assertEqual(set([]), got_topo.cells[3].pinned_cpus) self.assertEqual([set([0, 1])], got_topo.cells[0].siblings) self.assertEqual([], got_topo.cells[1].siblings) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_get_host_numa_topology(self, mock_version): self._test_get_host_numa_topology(mempages=True) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') def test_get_host_numa_topology_no_mempages(self, mock_lib_version, mock_version, mock_type): self.flags(virt_type='kvm', group='libvirt') mock_lib_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1 mock_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) mock_type.return_value = host.HV_DRIVER_QEMU self._test_get_host_numa_topology(mempages=False) def test_get_host_numa_topology_empty(self): caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = arch.X86_64 caps.host.topology = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps) ) as (has_min_version, get_caps): self.assertIsNone(drvr._get_host_numa_topology()) self.assertEqual(2, get_caps.call_count) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') def test_get_host_numa_topology_old_version(self, mock_lib_version, mock_version, mock_type): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_lib_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1 mock_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) mock_type.return_value = host.HV_DRIVER_QEMU self.assertIsNone(drvr._get_host_numa_topology()) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') def test_get_host_numa_topology_xen(self, mock_lib_version, mock_version, mock_type): self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_lib_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) mock_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) mock_type.return_value = host.HV_DRIVER_XEN self.assertIsNone(drvr._get_host_numa_topology()) def test_diagnostic_vcpus_exception(self): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> <target dev='vda' bus='virtio'/> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio'/> </disk> <interface type='network'> <mac address='52:54:00:a4:38:38'/> <source network='default'/> <target dev='vnet0'/> </interface> </devices> </domain> """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): raise fakelibvirt.libvirtError('vcpus missing') def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_blockstats_exception(self): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> <target dev='vda' bus='virtio'/> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio'/> </disk> <interface type='network'> <mac address='52:54:00:a4:38:38'/> <source network='default'/> <target dev='vnet0'/> </interface> </devices> </domain> """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): raise fakelibvirt.libvirtError('blockStats missing') def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_interfacestats_exception(self): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> <target dev='vda' bus='virtio'/> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio'/> </disk> <interface type='network'> <mac address='52:54:00:a4:38:38'/> <source network='default'/> <target dev='vnet0'/> </interface> </devices> </domain> """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): raise fakelibvirt.libvirtError('interfaceStat missing') def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_memorystats_exception(self): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> <target dev='vda' bus='virtio'/> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio'/> </disk> <interface type='network'> <mac address='52:54:00:a4:38:38'/> <source network='default'/> <target dev='vnet0'/> </interface> </devices> </domain> """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): raise fakelibvirt.libvirtError('memoryStats missing') def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_full(self): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> <target dev='vda' bus='virtio'/> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio'/> </disk> <interface type='network'> <mac address='52:54:00:a4:38:38'/> <source network='default'/> <target dev='vnet0'/> </interface> </devices> </domain> """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) @mock.patch.object(host.Host, 'get_domain') def test_diagnostic_full_with_multiple_interfaces(self, mock_get_domain): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> <target dev='vda' bus='virtio'/> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio'/> </disk> <interface type='network'> <mac address='52:54:00:a4:38:38'/> <source network='default'/> <target dev='vnet0'/> </interface> <interface type="bridge"> <mac address="53:55:00:a5:39:39"/> <model type="virtio"/> <target dev="br0"/> </interface> </devices> </domain> """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self): return DiagFakeDomain() mock_get_domain.side_effect = fake_get_domain drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, 'br0_rx': 4408, 'br0_rx_drop': 0, 'br0_rx_errors': 0, 'br0_rx_packets': 82, 'br0_tx': 0, 'br0_tx_drop': 0, 'br0_tx_errors': 0, 'br0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}, {'mac_address': '53:55:00:a5:39:39', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10., 'version': '1.0'} self.assertEqual(expected, actual.serialize()) @mock.patch.object(host.Host, "list_instance_domains") def test_failing_vcpu_count(self, mock_list): """Domain can fail to return the vcpu description in case it's just starting up or shutting down. Make sure None is handled gracefully. """ class DiagFakeDomain(object): def __init__(self, vcpus): self._vcpus = vcpus def vcpus(self): if self._vcpus is None: raise fakelibvirt.libvirtError("fake-error") else: return ([[1, 2, 3, 4]] * self._vcpus, [True] * self._vcpus) def ID(self): return 1 def name(self): return "instance000001" def UUIDString(self): return "19479fee-07a5-49bb-9138-d3738280d63c" mock_list.return_value = [ DiagFakeDomain(None), DiagFakeDomain(5)] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(5, drvr._get_vcpu_used()) mock_list.assert_called_with(only_guests=True, only_running=True) @mock.patch.object(host.Host, "list_instance_domains") def test_failing_vcpu_count_none(self, mock_list): """Domain will return zero if the current number of vcpus used is None. This is in case of VM state starting up or shutting down. None type returned is counted as zero. """ class DiagFakeDomain(object): def __init__(self): pass def vcpus(self): return None def ID(self): return 1 def name(self): return "instance000001" mock_list.return_value = [DiagFakeDomain()] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(0, drvr._get_vcpu_used()) mock_list.assert_called_with(only_guests=True, only_running=True) def test_get_instance_capabilities(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) def get_host_capabilities_stub(self): caps = vconfig.LibvirtConfigCaps() guest = vconfig.LibvirtConfigGuest() guest.ostype = 'hvm' guest.arch = arch.X86_64 guest.domtype = ['kvm', 'qemu'] caps.guests.append(guest) guest = vconfig.LibvirtConfigGuest() guest.ostype = 'hvm' guest.arch = arch.I686 guest.domtype = ['kvm'] caps.guests.append(guest) return caps self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) want = [(arch.X86_64, 'kvm', 'hvm'), (arch.X86_64, 'qemu', 'hvm'), (arch.I686, 'kvm', 'hvm')] got = drvr._get_instance_capabilities() self.assertEqual(want, got) def test_set_cache_mode(self): self.flags(disk_cachemodes=['file=directsync'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_conf = FakeConfigGuestDisk() fake_conf.source_type = 'file' drvr._set_cache_mode(fake_conf) self.assertEqual(fake_conf.driver_cache, 'directsync') def test_set_cache_mode_invalid_mode(self): self.flags(disk_cachemodes=['file=FAKE'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_conf = FakeConfigGuestDisk() fake_conf.source_type = 'file' drvr._set_cache_mode(fake_conf) self.assertIsNone(fake_conf.driver_cache) def test_set_cache_mode_invalid_object(self): self.flags(disk_cachemodes=['file=directsync'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_conf = FakeConfigGuest() fake_conf.driver_cache = 'fake' drvr._set_cache_mode(fake_conf) self.assertEqual(fake_conf.driver_cache, 'fake') @mock.patch('os.unlink') @mock.patch.object(os.path, 'exists') def _test_shared_storage_detection(self, is_same, mock_exists, mock_unlink): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.get_host_ip_addr = mock.MagicMock(return_value='bar') mock_exists.return_value = is_same with test.nested( mock.patch.object(drvr._remotefs, 'create_file'), mock.patch.object(drvr._remotefs, 'remove_file') ) as (mock_rem_fs_create, mock_rem_fs_remove): result = drvr._is_storage_shared_with('host', '/path') mock_rem_fs_create.assert_any_call('host', mock.ANY) create_args, create_kwargs = mock_rem_fs_create.call_args self.assertTrue(create_args[1].startswith('/path')) if is_same: mock_unlink.assert_called_once_with(mock.ANY) else: mock_rem_fs_remove.assert_called_with('host', mock.ANY) remove_args, remove_kwargs = mock_rem_fs_remove.call_args self.assertTrue(remove_args[1].startswith('/path')) return result def test_shared_storage_detection_same_host(self): self.assertTrue(self._test_shared_storage_detection(True)) def test_shared_storage_detection_different_host(self): self.assertFalse(self._test_shared_storage_detection(False)) def test_shared_storage_detection_easy(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.mox.StubOutWithMock(drvr, 'get_host_ip_addr') self.mox.StubOutWithMock(utils, 'execute') self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(os, 'unlink') drvr.get_host_ip_addr().AndReturn('foo') self.mox.ReplayAll() self.assertTrue(drvr._is_storage_shared_with('foo', '/path')) def test_store_pid_remove_pid(self): instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) popen = mock.Mock(pid=3) drvr.job_tracker.add_job(instance, popen.pid) self.assertIn(3, drvr.job_tracker.jobs[instance.uuid]) drvr.job_tracker.remove_job(instance, popen.pid) self.assertNotIn(instance.uuid, drvr.job_tracker.jobs) @mock.patch('nova.virt.libvirt.host.Host.get_domain') def test_get_domain_info_with_more_return(self, mock_get_domain): instance = objects.Instance(**self.test_instance) dom_mock = mock.MagicMock() dom_mock.info.return_value = [ 1, 2048, 737, 8, 12345, 888888 ] dom_mock.ID.return_value = mock.sentinel.instance_id mock_get_domain.return_value = dom_mock drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_info(instance) self.assertEqual(1, info.state) self.assertEqual(2048, info.max_mem_kb) self.assertEqual(737, info.mem_kb) self.assertEqual(8, info.num_cpu) self.assertEqual(12345, info.cpu_time_ns) self.assertEqual(mock.sentinel.instance_id, info.id) dom_mock.info.assert_called_once_with() dom_mock.ID.assert_called_once_with() mock_get_domain.assert_called_once_with(instance) def test_create_domain(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_domain = mock.MagicMock() guest = drvr._create_domain(domain=mock_domain) self.assertEqual(mock_domain, guest._domain) mock_domain.createWithFlags.assert_has_calls([mock.call(0)]) @mock.patch('nova.virt.disk.api.clean_lxc_namespace') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_create_domain_lxc(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_get_info, mock_clean): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() inst_sys_meta = dict() mock_instance.system_metadata = inst_sys_meta mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_get_info.return_value = hardware.InstanceInfo( state=power_state.RUNNING) with test.nested( mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')): drvr._create_domain_and_network(self.context, 'xml', mock_instance, [], None) self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name']) self.assertFalse(mock_instance.called) mock_get_inst_path.assert_has_calls([mock.call(mock_instance)]) mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')]) drvr.image_backend.image.assert_has_calls([mock.call(mock_instance, 'disk')]) setup_container_call = mock.call( mock_image.get_model(), container_dir='/tmp/rootfs') mock_setup_container.assert_has_calls([setup_container_call]) mock_get_info.assert_has_calls([mock.call(mock_instance)]) mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')]) @mock.patch('nova.virt.disk.api.clean_lxc_namespace') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch.object(fake_libvirt_utils, 'chown_for_id_maps') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_create_domain_lxc_id_maps(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_chown, mock_get_info, mock_clean): self.flags(virt_type='lxc', uid_maps=["0:1000:100"], gid_maps=["0:1000:100"], group='libvirt') def chown_side_effect(path, id_maps): self.assertEqual('/tmp/rootfs', path) self.assertIsInstance(id_maps[0], vconfig.LibvirtConfigGuestUIDMap) self.assertEqual(0, id_maps[0].start) self.assertEqual(1000, id_maps[0].target) self.assertEqual(100, id_maps[0].count) self.assertIsInstance(id_maps[1], vconfig.LibvirtConfigGuestGIDMap) self.assertEqual(0, id_maps[1].start) self.assertEqual(1000, id_maps[1].target) self.assertEqual(100, id_maps[1].count) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() inst_sys_meta = dict() mock_instance.system_metadata = inst_sys_meta mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_chown.side_effect = chown_side_effect mock_get_info.return_value = hardware.InstanceInfo( state=power_state.RUNNING) with test.nested( mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter') ) as ( mock_is_booted_from_volume, mock_create_domain, mock_plug_vifs, mock_setup_basic_filtering, mock_prepare_instance_filter, mock_apply_instance_filter ): drvr._create_domain_and_network(self.context, 'xml', mock_instance, [], None) self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name']) self.assertFalse(mock_instance.called) mock_get_inst_path.assert_has_calls([mock.call(mock_instance)]) mock_is_booted_from_volume.assert_called_once_with(mock_instance, {}) mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')]) drvr.image_backend.image.assert_has_calls([mock.call(mock_instance, 'disk')]) setup_container_call = mock.call( mock_image.get_model(), container_dir='/tmp/rootfs') mock_setup_container.assert_has_calls([setup_container_call]) mock_get_info.assert_has_calls([mock.call(mock_instance)]) mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')]) @mock.patch('nova.virt.disk.api.teardown_container') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_create_domain_lxc_not_running(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_get_info, mock_teardown): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() inst_sys_meta = dict() mock_instance.system_metadata = inst_sys_meta mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_get_info.return_value = hardware.InstanceInfo( state=power_state.SHUTDOWN) with test.nested( mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')): drvr._create_domain_and_network(self.context, 'xml', mock_instance, [], None) self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name']) self.assertFalse(mock_instance.called) mock_get_inst_path.assert_has_calls([mock.call(mock_instance)]) mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')]) drvr.image_backend.image.assert_has_calls([mock.call(mock_instance, 'disk')]) setup_container_call = mock.call( mock_image.get_model(), container_dir='/tmp/rootfs') mock_setup_container.assert_has_calls([setup_container_call]) mock_get_info.assert_has_calls([mock.call(mock_instance)]) teardown_call = mock.call(container_dir='/tmp/rootfs') mock_teardown.assert_has_calls([teardown_call]) def test_create_domain_define_xml_fails(self): """Tests that the xml is logged when defining the domain fails.""" fake_xml = "<test>this is a test</test>" def fake_defineXML(xml): self.assertEqual(fake_xml, xml) raise fakelibvirt.libvirtError('virDomainDefineXML() failed') def fake_safe_decode(text, *args, **kwargs): return text + 'safe decoded' self.log_error_called = False def fake_error(msg, *args, **kwargs): self.log_error_called = True self.assertIn(fake_xml, msg % args) self.assertIn('safe decoded', msg % args) self.stubs.Set(encodeutils, 'safe_decode', fake_safe_decode) self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error) self.create_fake_libvirt_mock(defineXML=fake_defineXML) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain, fake_xml) self.assertTrue(self.log_error_called) def test_create_domain_with_flags_fails(self): """Tests that the xml is logged when creating the domain with flags fails """ fake_xml = "<test>this is a test</test>" fake_domain = FakeVirtDomain(fake_xml) def fake_createWithFlags(launch_flags): raise fakelibvirt.libvirtError('virDomainCreateWithFlags() failed') self.log_error_called = False def fake_error(msg, *args, **kwargs): self.log_error_called = True self.assertIn(fake_xml, msg % args) self.stubs.Set(fake_domain, 'createWithFlags', fake_createWithFlags) self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error) self.create_fake_libvirt_mock() self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain, domain=fake_domain) self.assertTrue(self.log_error_called) def test_create_domain_enable_hairpin_fails(self): """Tests that the xml is logged when enabling hairpin mode for the domain fails. """ fake_xml = "<test>this is a test</test>" fake_domain = FakeVirtDomain(fake_xml) def fake_execute(*args, **kwargs): raise processutils.ProcessExecutionError('error') def fake_get_interfaces(*args): return ["dev"] self.log_error_called = False def fake_error(msg, *args, **kwargs): self.log_error_called = True self.assertIn(fake_xml, msg % args) self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error) self.create_fake_libvirt_mock() self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.stubs.Set(nova.utils, 'execute', fake_execute) self.stubs.Set( nova.virt.libvirt.guest.Guest, 'get_interfaces', fake_get_interfaces) self.assertRaises(processutils.ProcessExecutionError, drvr._create_domain, domain=fake_domain, power_on=False) self.assertTrue(self.log_error_called) def test_get_vnc_console(self): instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<graphics type='vnc' port='5900'/>" "</devices></domain>") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) vnc_dict = drvr.get_vnc_console(self.context, instance) self.assertEqual(vnc_dict.port, '5900') def test_get_vnc_console_unavailable(self): instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices></devices></domain>") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.ConsoleTypeUnavailable, drvr.get_vnc_console, self.context, instance) def test_get_spice_console(self): instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<graphics type='spice' port='5950'/>" "</devices></domain>") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) spice_dict = drvr.get_spice_console(self.context, instance) self.assertEqual(spice_dict.port, '5950') def test_get_spice_console_unavailable(self): instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices></devices></domain>") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.ConsoleTypeUnavailable, drvr.get_spice_console, self.context, instance) def test_detach_volume_with_instance_not_found(self): # Test that detach_volume() method does not raise exception, # if the instance does not exist. instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(host.Host, 'get_domain', side_effect=exception.InstanceNotFound( instance_id=instance.uuid)), mock.patch.object(drvr, '_disconnect_volume') ) as (_get_domain, _disconnect_volume): connection_info = {'driver_volume_type': 'fake'} drvr.detach_volume(connection_info, instance, '/dev/sda') _get_domain.assert_called_once_with(instance) _disconnect_volume.assert_called_once_with(connection_info, 'sda') def _test_attach_detach_interface_get_config(self, method_name): """Tests that the get_config() method is properly called in attach_interface() and detach_interface(). method_name: either \"attach_interface\" or \"detach_interface\" depending on the method to test. """ self.stubs.Set(host.Host, "get_domain", lambda a, b: FakeVirtDomain()) instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_image_meta = objects.ImageMeta.from_dict( {'id': instance['image_ref']}) if method_name == "attach_interface": self.mox.StubOutWithMock(drvr.firewall_driver, 'setup_basic_filtering') drvr.firewall_driver.setup_basic_filtering(instance, network_info) expected = drvr.vif_driver.get_config(instance, network_info[0], fake_image_meta, instance.get_flavor(), CONF.libvirt.virt_type, drvr._host) self.mox.StubOutWithMock(drvr.vif_driver, 'get_config') drvr.vif_driver.get_config(instance, network_info[0], mox.IsA(objects.ImageMeta), mox.IsA(objects.Flavor), CONF.libvirt.virt_type, drvr._host).\ AndReturn(expected) self.mox.ReplayAll() if method_name == "attach_interface": drvr.attach_interface(instance, fake_image_meta, network_info[0]) elif method_name == "detach_interface": drvr.detach_interface(instance, network_info[0]) else: raise ValueError("Unhandled method %s" % method_name) @mock.patch.object(lockutils, "external_lock") def test_attach_interface_get_config(self, mock_lock): """Tests that the get_config() method is properly called in attach_interface(). """ mock_lock.return_value = threading.Semaphore() self._test_attach_detach_interface_get_config("attach_interface") def test_detach_interface_get_config(self): """Tests that the get_config() method is properly called in detach_interface(). """ self._test_attach_detach_interface_get_config("detach_interface") def test_default_root_device_name(self): instance = {'uuid': 'fake_instance'} image_meta = objects.ImageMeta.from_dict({'id': uuids.image_id}) root_bdm = {'source_type': 'image', 'destination_type': 'volume', 'image_id': 'fake_id'} self.flags(virt_type='qemu', group='libvirt') self.mox.StubOutWithMock(blockinfo, 'get_disk_bus_for_device_type') self.mox.StubOutWithMock(blockinfo, 'get_root_info') blockinfo.get_disk_bus_for_device_type(instance, 'qemu', image_meta, 'disk').InAnyOrder().\ AndReturn('virtio') blockinfo.get_disk_bus_for_device_type(instance, 'qemu', image_meta, 'cdrom').InAnyOrder().\ AndReturn('ide') blockinfo.get_root_info(instance, 'qemu', image_meta, root_bdm, 'virtio', 'ide').AndReturn({'dev': 'vda'}) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(drvr.default_root_device_name(instance, image_meta, root_bdm), '/dev/vda') @mock.patch.object(objects.BlockDeviceMapping, "save") def test_default_device_names_for_instance(self, save_mock): instance = objects.Instance(**self.test_instance) instance.root_device_name = '/dev/vda' ephemerals = [objects.BlockDeviceMapping( **fake_block_device.AnonFakeDbBlockDeviceDict( {'device_name': 'vdb', 'source_type': 'blank', 'volume_size': 2, 'destination_type': 'local'}))] swap = [objects.BlockDeviceMapping( **fake_block_device.AnonFakeDbBlockDeviceDict( {'device_name': 'vdg', 'source_type': 'blank', 'volume_size': 512, 'guest_format': 'swap', 'destination_type': 'local'}))] block_device_mapping = [ objects.BlockDeviceMapping( **fake_block_device.AnonFakeDbBlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-image-id', 'device_name': '/dev/vdxx', 'disk_bus': 'scsi'}))] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.default_device_names_for_instance(instance, instance.root_device_name, ephemerals, swap, block_device_mapping) # Ephemeral device name was correct so no changes self.assertEqual('/dev/vdb', ephemerals[0].device_name) # Swap device name was incorrect so it was changed self.assertEqual('/dev/vdc', swap[0].device_name) # Volume device name was changed too, taking the bus into account self.assertEqual('/dev/sda', block_device_mapping[0].device_name) self.assertEqual(3, save_mock.call_count) def _test_get_device_name_for_instance(self, new_bdm, expected_dev): instance = objects.Instance(**self.test_instance) instance.root_device_name = '/dev/vda' instance.ephemeral_gb = 0 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) got_dev = drvr.get_device_name_for_instance( instance, [], new_bdm) self.assertEqual(expected_dev, got_dev) def test_get_device_name_for_instance_simple(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name=None, guest_format=None, disk_bus=None, device_type=None) self._test_get_device_name_for_instance(new_bdm, '/dev/vdb') def test_get_device_name_for_instance_suggested(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name='/dev/vdg', guest_format=None, disk_bus=None, device_type=None) self._test_get_device_name_for_instance(new_bdm, '/dev/vdb') def test_get_device_name_for_instance_bus(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name=None, guest_format=None, disk_bus='scsi', device_type=None) self._test_get_device_name_for_instance(new_bdm, '/dev/sda') def test_get_device_name_for_instance_device_type(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name=None, guest_format=None, disk_bus=None, device_type='floppy') self._test_get_device_name_for_instance(new_bdm, '/dev/fda') def test_is_supported_fs_format(self): supported_fs = [disk_api.FS_FORMAT_EXT2, disk_api.FS_FORMAT_EXT3, disk_api.FS_FORMAT_EXT4, disk_api.FS_FORMAT_XFS] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) for fs in supported_fs: self.assertTrue(drvr.is_supported_fs_format(fs)) supported_fs = ['', 'dummy'] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) for fs in supported_fs: self.assertFalse(drvr.is_supported_fs_format(fs)) def test_post_live_migration_at_destination_with_block_device_info(self): # Preparing mocks mock_domain = self.mox.CreateMock(fakelibvirt.virDomain) self.resultXML = None def fake_getLibVersion(): return fakelibvirt.FAKE_LIBVIRT_VERSION def fake_getCapabilities(): return """ <capabilities> <host> <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid> <cpu> <arch>x86_64</arch> <model>Penryn</model> <vendor>Intel</vendor> <topology sockets='1' cores='2' threads='1'/> <feature name='xtpr'/> </cpu> </host> </capabilities> """ def fake_to_xml(context, instance, network_info, disk_info, image_meta=None, rescue=None, block_device_info=None, write_to_disk=False): if image_meta is None: image_meta = objects.ImageMeta.from_dict({}) conf = drvr._get_guest_config(instance, network_info, image_meta, disk_info, rescue, block_device_info) self.resultXML = conf.to_xml() return self.resultXML def fake_get_domain(instance): return mock_domain def fake_baselineCPU(cpu, flag): return """<cpu mode='custom' match='exact'> <model fallback='allow'>Westmere</model> <vendor>Intel</vendor> <feature policy='require' name='aes'/> </cpu> """ network_info = _fake_network_info(self, 1) self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion, getCapabilities=fake_getCapabilities, getVersion=lambda: 1005001, listDefinedDomains=lambda: [], numOfDomains=lambda: 0, baselineCPU=fake_baselineCPU) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 # we send an int to test sha1 call instance = objects.Instance(**instance_ref) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_to_xml) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) bdm = objects.BlockDeviceMapping( self.context, **fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'guest_format': None, 'boot_index': 0, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vda', 'disk_bus': 'virtio', 'device_type': 'disk', 'delete_on_termination': False})) block_device_info = {'block_device_mapping': driver_block_device.convert_volumes([bdm])} block_device_info['block_device_mapping'][0]['connection_info'] = ( {'driver_volume_type': 'iscsi'}) with test.nested( mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save'), mock.patch.object(objects.Instance, 'save') ) as (mock_volume_save, mock_instance_save): drvr.post_live_migration_at_destination( self.context, instance, network_info, True, block_device_info=block_device_info) self.assertIn('fake', self.resultXML) mock_volume_save.assert_called_once_with() def test_create_propagates_exceptions(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(id=1, uuid=uuids.instance, image_ref='my_fake_image') with test.nested( mock.patch.object(drvr, '_create_domain_setup_lxc'), mock.patch.object(drvr, '_create_domain_cleanup_lxc'), mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr, 'firewall_driver'), mock.patch.object(drvr, '_create_domain', side_effect=exception.NovaException), mock.patch.object(drvr, 'cleanup')): self.assertRaises(exception.NovaException, drvr._create_domain_and_network, self.context, 'xml', instance, None, None) def test_create_without_pause(self): self.flags(virt_type='lxc', group='libvirt') @contextlib.contextmanager def fake_lxc_disk_handler(*args, **kwargs): yield drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) with test.nested( mock.patch.object(drvr, '_lxc_disk_handler', side_effect=fake_lxc_disk_handler), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr, 'firewall_driver'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'cleanup')) as ( _handler, cleanup, firewall_driver, create, plug_vifs): domain = drvr._create_domain_and_network(self.context, 'xml', instance, None, None) self.assertEqual(0, create.call_args_list[0][1]['pause']) self.assertEqual(0, domain.resume.call_count) def _test_create_with_network_events(self, neutron_failure=None, power_on=True): generated_events = [] def wait_timeout(): event = mock.MagicMock() if neutron_failure == 'timeout': raise eventlet.timeout.Timeout() elif neutron_failure == 'error': event.status = 'failed' else: event.status = 'completed' return event def fake_prepare(instance, event_name): m = mock.MagicMock() m.instance = instance m.event_name = event_name m.wait.side_effect = wait_timeout generated_events.append(m) return m virtapi = manager.ComputeVirtAPI(mock.MagicMock()) prepare = virtapi._compute.instance_events.prepare_for_instance_event prepare.side_effect = fake_prepare drvr = libvirt_driver.LibvirtDriver(virtapi, False) instance = objects.Instance(**self.test_instance) vifs = [{'id': 'vif1', 'active': False}, {'id': 'vif2', 'active': False}] @mock.patch.object(drvr, 'plug_vifs') @mock.patch.object(drvr, 'firewall_driver') @mock.patch.object(drvr, '_create_domain') @mock.patch.object(drvr, 'cleanup') def test_create(cleanup, create, fw_driver, plug_vifs): domain = drvr._create_domain_and_network(self.context, 'xml', instance, vifs, None, power_on=power_on) plug_vifs.assert_called_with(instance, vifs) pause = self._get_pause_flag(drvr, vifs, power_on=power_on) self.assertEqual(pause, create.call_args_list[0][1]['pause']) if pause: domain.resume.assert_called_once_with() if neutron_failure and CONF.vif_plugging_is_fatal: cleanup.assert_called_once_with(self.context, instance, network_info=vifs, block_device_info=None) test_create() if utils.is_neutron() and CONF.vif_plugging_timeout and power_on: prepare.assert_has_calls([ mock.call(instance, 'network-vif-plugged-vif1'), mock.call(instance, 'network-vif-plugged-vif2')]) for event in generated_events: if neutron_failure and generated_events.index(event) != 0: self.assertEqual(0, event.call_count) elif (neutron_failure == 'error' and not CONF.vif_plugging_is_fatal): event.wait.assert_called_once_with() else: self.assertEqual(0, prepare.call_count) @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron(self, is_neutron): self._test_create_with_network_events() @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_power_off(self, is_neutron): # Tests that we don't wait for events if we don't start the instance. self._test_create_with_network_events(power_on=False) @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_nowait(self, is_neutron): self.flags(vif_plugging_timeout=0) self._test_create_with_network_events() @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_nonfatal_timeout( self, is_neutron): self.flags(vif_plugging_is_fatal=False) self._test_create_with_network_events(neutron_failure='timeout') @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_fatal_timeout( self, is_neutron): self.assertRaises(exception.VirtualInterfaceCreateException, self._test_create_with_network_events, neutron_failure='timeout') @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_nonfatal_error( self, is_neutron): self.flags(vif_plugging_is_fatal=False) self._test_create_with_network_events(neutron_failure='error') @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_fatal_error( self, is_neutron): self.assertRaises(exception.VirtualInterfaceCreateException, self._test_create_with_network_events, neutron_failure='error') @mock.patch('nova.utils.is_neutron', return_value=False) def test_create_with_network_events_non_neutron(self, is_neutron): self._test_create_with_network_events() @mock.patch('nova.volume.encryptors.get_encryption_metadata') @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm') def test_create_with_bdm(self, get_info_from_bdm, get_encryption_metadata): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_dom = mock.MagicMock() mock_encryption_meta = mock.MagicMock() get_encryption_metadata.return_value = mock_encryption_meta fake_xml = """ <domain> <name>instance-00000001</name> <memory>1048576</memory> <vcpu>1</vcpu> <devices> <disk type='file' device='disk'> <driver name='qemu' type='raw' cache='none'/> <source file='/path/fake-volume1'/> <target dev='vda' bus='virtio'/> </disk> </devices> </domain> """ fake_volume_id = "fake-volume-id" connection_info = {"driver_volume_type": "fake", "data": {"access_mode": "rw", "volume_id": fake_volume_id}} def fake_getitem(*args, **kwargs): fake_bdm = {'connection_info': connection_info, 'mount_device': '/dev/vda'} return fake_bdm.get(args[0]) mock_volume = mock.MagicMock() mock_volume.__getitem__.side_effect = fake_getitem block_device_info = {'block_device_mapping': [mock_volume]} network_info = [network_model.VIF(id='1'), network_model.VIF(id='2', active=True)] with test.nested( mock.patch.object(drvr, '_get_volume_encryptor'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'), ) as (get_volume_encryptor, plug_vifs, setup_basic_filtering, prepare_instance_filter, create_domain, apply_instance_filter): create_domain.return_value = libvirt_guest.Guest(mock_dom) guest = drvr._create_domain_and_network( self.context, fake_xml, instance, network_info, None, block_device_info=block_device_info) get_encryption_metadata.assert_called_once_with(self.context, drvr._volume_api, fake_volume_id, connection_info) get_volume_encryptor.assert_called_once_with(connection_info, mock_encryption_meta) plug_vifs.assert_called_once_with(instance, network_info) setup_basic_filtering.assert_called_once_with(instance, network_info) prepare_instance_filter.assert_called_once_with(instance, network_info) pause = self._get_pause_flag(drvr, network_info) create_domain.assert_called_once_with( fake_xml, pause=pause, power_on=True, post_xml_callback=None) self.assertEqual(mock_dom, guest._domain) def test_get_guest_storage_config(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) test_instance = copy.deepcopy(self.test_instance) test_instance["default_swap_device"] = None instance = objects.Instance(**test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = instance.get_flavor() conn_info = {'driver_volume_type': 'fake', 'data': {}} bdm = objects.BlockDeviceMapping( self.context, **fake_block_device.FakeDbBlockDeviceDict({ 'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vdc'})) bdi = {'block_device_mapping': driver_block_device.convert_volumes([bdm])} bdm = bdi['block_device_mapping'][0] bdm['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta, bdi) mock_conf = mock.MagicMock(source_path='fake') with test.nested( mock.patch.object(driver_block_device.DriverVolumeBlockDevice, 'save'), mock.patch.object(drvr, '_connect_volume'), mock.patch.object(drvr, '_get_volume_config', return_value=mock_conf), mock.patch.object(drvr, '_set_cache_mode') ) as (volume_save, connect_volume, get_volume_config, set_cache_mode): devices = drvr._get_guest_storage_config(instance, image_meta, disk_info, False, bdi, flavor, "hvm") self.assertEqual(3, len(devices)) self.assertEqual('/dev/vdb', instance.default_ephemeral_device) self.assertIsNone(instance.default_swap_device) connect_volume.assert_called_with(bdm['connection_info'], {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'}) get_volume_config.assert_called_with(bdm['connection_info'], {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'}) volume_save.assert_called_once_with() self.assertEqual(3, set_cache_mode.call_count) def test_get_neutron_events(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) network_info = [network_model.VIF(id='1'), network_model.VIF(id='2', active=True)] events = drvr._get_neutron_events(network_info) self.assertEqual([('network-vif-plugged', '1')], events) def test_unplug_vifs_ignores_errors(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) with mock.patch.object(drvr, 'vif_driver') as vif_driver: vif_driver.unplug.side_effect = exception.AgentError( method='unplug') drvr._unplug_vifs('inst', [1], ignore_errors=True) vif_driver.unplug.assert_called_once_with('inst', 1) def test_unplug_vifs_reports_errors(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) with mock.patch.object(drvr, 'vif_driver') as vif_driver: vif_driver.unplug.side_effect = exception.AgentError( method='unplug') self.assertRaises(exception.AgentError, drvr.unplug_vifs, 'inst', [1]) vif_driver.unplug.assert_called_once_with('inst', 1) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') def test_cleanup_pass_with_no_mount_device(self, undefine, unplug): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) drvr.firewall_driver = mock.Mock() drvr._disconnect_volume = mock.Mock() fake_inst = {'name': 'foo'} fake_bdms = [{'connection_info': 'foo', 'mount_device': None}] with mock.patch('nova.virt.driver' '.block_device_info_get_mapping', return_value=fake_bdms): drvr.cleanup('ctxt', fake_inst, 'netinfo', destroy_disks=False) self.assertTrue(drvr._disconnect_volume.called) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') def test_cleanup_wants_vif_errors_ignored(self, undefine, unplug): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) fake_inst = {'name': 'foo'} with mock.patch.object(drvr._conn, 'lookupByName') as lookup: lookup.return_value = fake_inst # NOTE(danms): Make unplug cause us to bail early, since # we only care about how it was called unplug.side_effect = test.TestingException self.assertRaises(test.TestingException, drvr.cleanup, 'ctxt', fake_inst, 'netinfo') unplug.assert_called_once_with(fake_inst, 'netinfo', True) @mock.patch.object(libvirt_driver.LibvirtDriver, 'unfilter_instance') @mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files', return_value=True) @mock.patch.object(objects.Instance, 'save') @mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain') def test_cleanup_migrate_data_shared_block_storage(self, _undefine_domain, save, delete_instance_files, unfilter_instance): # Tests the cleanup method when migrate_data has # is_shared_block_storage=True and destroy_disks=False. instance = objects.Instance(self.context, **self.test_instance) migrate_data = objects.LibvirtLiveMigrateData( is_shared_block_storage=True) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) drvr.cleanup( self.context, instance, network_info={}, destroy_disks=False, migrate_data=migrate_data, destroy_vifs=False) delete_instance_files.assert_called_once_with(instance) self.assertEqual(1, int(instance.system_metadata['clean_attempts'])) self.assertTrue(instance.cleaned) save.assert_called_once_with() def test_swap_volume(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) mock_dom = mock.MagicMock() guest = libvirt_guest.Guest(mock_dom) with mock.patch.object(drvr._conn, 'defineXML', create=True) as mock_define: xmldoc = "<domain/>" srcfile = "/first/path" dstfile = "/second/path" mock_dom.XMLDesc.return_value = xmldoc mock_dom.isPersistent.return_value = True mock_dom.blockJobInfo.return_value = { 'type': 0, 'bandwidth': 0, 'cur': 100, 'end': 100 } drvr._swap_volume(guest, srcfile, dstfile, 1) mock_dom.XMLDesc.assert_called_once_with( flags=(fakelibvirt.VIR_DOMAIN_XML_INACTIVE | fakelibvirt.VIR_DOMAIN_XML_SECURE)) mock_dom.blockRebase.assert_called_once_with( srcfile, dstfile, 0, flags=( fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)) mock_dom.blockResize.assert_called_once_with( srcfile, 1 * units.Gi / units.Ki) mock_define.assert_called_once_with(xmldoc) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._swap_volume') @mock.patch('nova.objects.block_device.BlockDeviceMapping.' 'get_by_volume_and_instance') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_config') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume') @mock.patch('nova.virt.libvirt.host.Host.get_guest') def _test_swap_volume_driver_bdm_save(self, get_guest, connect_volume, get_volume_config, get_by_volume_and_instance, swap_volume, disconnect_volume, volume_save, source_type): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) old_connection_info = {'driver_volume_type': 'fake', 'serial': 'old-volume-id', 'data': {'device_path': '/fake-old-volume', 'access_mode': 'rw'}} new_connection_info = {'driver_volume_type': 'fake', 'serial': 'new-volume-id', 'data': {'device_path': '/fake-new-volume', 'access_mode': 'rw'}} mock_dom = mock.MagicMock() guest = libvirt_guest.Guest(mock_dom) mock_dom.XMLDesc.return_value = """<domain> <devices> <disk type='file'> <source file='/fake-old-volume'/> <target dev='vdb' bus='virtio'/> </disk> </devices> </domain> """ mock_dom.name.return_value = 'inst' mock_dom.UUIDString.return_value = 'uuid' get_guest.return_value = guest disk_info = {'bus': 'virtio', 'type': 'disk', 'dev': 'vdb'} get_volume_config.return_value = mock.MagicMock( source_path='/fake-new-volume') bdm = objects.BlockDeviceMapping(self.context, **fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'instance_uuid': uuids.instance, 'device_name': '/dev/vdb', 'source_type': source_type, 'destination_type': 'volume', 'volume_id': 'fake-volume-id-2', 'boot_index': 0})) get_by_volume_and_instance.return_value = bdm conn.swap_volume(old_connection_info, new_connection_info, instance, '/dev/vdb', 1) get_guest.assert_called_once_with(instance) connect_volume.assert_called_once_with(new_connection_info, disk_info) swap_volume.assert_called_once_with(guest, 'vdb', '/fake-new-volume', 1) disconnect_volume.assert_called_once_with(old_connection_info, 'vdb') volume_save.assert_called_once_with() @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save') def test_swap_volume_driver_bdm_save_source_is_volume(self, volume_save): self._test_swap_volume_driver_bdm_save(volume_save=volume_save, source_type='volume') @mock.patch('nova.virt.block_device.DriverImageBlockDevice.save') def test_swap_volume_driver_bdm_save_source_is_image(self, volume_save): self._test_swap_volume_driver_bdm_save(volume_save=volume_save, source_type='image') @mock.patch('nova.virt.block_device.DriverSnapshotBlockDevice.save') def test_swap_volume_driver_bdm_save_source_is_snapshot(self, volume_save): self._test_swap_volume_driver_bdm_save(volume_save=volume_save, source_type='snapshot') def _test_live_snapshot(self, can_quiesce=False, require_quiesce=False): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) mock_dom = mock.MagicMock() test_image_meta = self.test_image_meta.copy() if require_quiesce: test_image_meta = {'properties': {'os_require_quiesce': 'yes'}} with test.nested( mock.patch.object(drvr._conn, 'defineXML', create=True), mock.patch.object(fake_libvirt_utils, 'get_disk_size'), mock.patch.object(fake_libvirt_utils, 'get_disk_backing_file'), mock.patch.object(fake_libvirt_utils, 'create_cow_image'), mock.patch.object(fake_libvirt_utils, 'chown'), mock.patch.object(fake_libvirt_utils, 'extract_snapshot'), mock.patch.object(drvr, '_set_quiesced') ) as (mock_define, mock_size, mock_backing, mock_create_cow, mock_chown, mock_snapshot, mock_quiesce): xmldoc = "<domain/>" srcfile = "/first/path" dstfile = "/second/path" bckfile = "/other/path" dltfile = dstfile + ".delta" mock_dom.XMLDesc.return_value = xmldoc mock_dom.isPersistent.return_value = True mock_size.return_value = 1004009 mock_backing.return_value = bckfile guest = libvirt_guest.Guest(mock_dom) if not can_quiesce: mock_quiesce.side_effect = ( exception.InstanceQuiesceNotSupported( instance_id=self.test_instance['id'], reason='test')) image_meta = objects.ImageMeta.from_dict(test_image_meta) drvr._live_snapshot(self.context, self.test_instance, guest, srcfile, dstfile, "qcow2", "qcow2", image_meta) mock_dom.XMLDesc.assert_called_once_with(flags=( fakelibvirt.VIR_DOMAIN_XML_INACTIVE | fakelibvirt.VIR_DOMAIN_XML_SECURE)) mock_dom.blockRebase.assert_called_once_with( srcfile, dltfile, 0, flags=( fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT | fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)) mock_size.assert_called_once_with(srcfile, format="qcow2") mock_backing.assert_called_once_with(srcfile, basename=False, format="qcow2") mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009) mock_chown.assert_called_once_with(dltfile, os.getuid()) mock_snapshot.assert_called_once_with(dltfile, "qcow2", dstfile, "qcow2") mock_define.assert_called_once_with(xmldoc) mock_quiesce.assert_any_call(mock.ANY, self.test_instance, mock.ANY, True) if can_quiesce: mock_quiesce.assert_any_call(mock.ANY, self.test_instance, mock.ANY, False) def test_live_snapshot(self): self._test_live_snapshot() def test_live_snapshot_with_quiesce(self): self._test_live_snapshot(can_quiesce=True) def test_live_snapshot_with_require_quiesce(self): self._test_live_snapshot(can_quiesce=True, require_quiesce=True) def test_live_snapshot_with_require_quiesce_fails(self): self.assertRaises(exception.InstanceQuiesceNotSupported, self._test_live_snapshot, can_quiesce=False, require_quiesce=True) @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration") def test_live_migration_hostname_valid(self, mock_lm): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.live_migration(self.context, self.test_instance, "host1.example.com", lambda x: x, lambda x: x) self.assertEqual(1, mock_lm.call_count) @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration") @mock.patch.object(fake_libvirt_utils, "is_valid_hostname") def test_live_migration_hostname_invalid(self, mock_hostname, mock_lm): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_hostname.return_value = False self.assertRaises(exception.InvalidHostname, drvr.live_migration, self.context, self.test_instance, "foo/?com=/bin/sh", lambda x: x, lambda x: x) def test_live_migration_force_complete(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = fake_instance.fake_instance_obj( None, name='instancename', id=1, uuid='c83a75d4-4d53-4be5-9a40-04d9c0389ff8') drvr.active_migrations[instance.uuid] = deque() drvr.live_migration_force_complete(instance) self.assertEqual( 1, drvr.active_migrations[instance.uuid].count("force-complete")) @mock.patch.object(host.Host, "get_connection") @mock.patch.object(fakelibvirt.virDomain, "abortJob") def test_live_migration_abort(self, mock_abort, mock_conn): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", False) guest = libvirt_guest.Guest(dom) with mock.patch.object(nova.virt.libvirt.host.Host, 'get_guest', return_value=guest): drvr.live_migration_abort(self.test_instance) self.assertTrue(mock_abort.called) @mock.patch('os.path.exists', return_value=True) @mock.patch('tempfile.mkstemp') @mock.patch('os.close', return_value=None) def test_check_instance_shared_storage_local_raw(self, mock_close, mock_mkstemp, mock_exists): instance_uuid = str(uuid.uuid4()) self.flags(images_type='raw', group='libvirt') self.flags(instances_path='/tmp') mock_mkstemp.return_value = (-1, '/tmp/{0}/file'.format(instance_uuid)) driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) temp_file = driver.check_instance_shared_storage_local(self.context, instance) self.assertEqual('/tmp/{0}/file'.format(instance_uuid), temp_file['filename']) def test_check_instance_shared_storage_local_rbd(self): self.flags(images_type='rbd', group='libvirt') driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) self.assertIsNone(driver. check_instance_shared_storage_local(self.context, instance)) def test_version_to_string(self): driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) string_ver = driver._version_to_string((4, 33, 173)) self.assertEqual("4.33.173", string_ver) def test_parallels_min_version_fail(self): self.flags(virt_type='parallels', group='libvirt') driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(driver._conn, 'getLibVersion', return_value=1002011): self.assertRaises(exception.NovaException, driver.init_host, 'wibble') def test_parallels_min_version_ok(self): self.flags(virt_type='parallels', group='libvirt') driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(driver._conn, 'getLibVersion', return_value=1002012): driver.init_host('wibble') def test_get_guest_config_parallels_vm(self): self.flags(virt_type='parallels', group='libvirt') self.flags(images_type='ploop', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual("parallels", cfg.virt_type) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory) self.assertEqual(instance_ref.flavor.vcpus, cfg.vcpus) self.assertEqual(vm_mode.HVM, cfg.os_type) self.assertIsNone(cfg.os_root) self.assertEqual(6, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[0].driver_format, "ploop") self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) def test_get_guest_config_parallels_ct_rescue(self): self._test_get_guest_config_parallels_ct(rescue=True) def test_get_guest_config_parallels_ct(self): self._test_get_guest_config_parallels_ct(rescue=False) def _test_get_guest_config_parallels_ct(self, rescue=False): self.flags(virt_type='parallels', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) ct_instance = self.test_instance.copy() ct_instance["vm_mode"] = vm_mode.EXE instance_ref = objects.Instance(**ct_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) if rescue: rescue_data = ct_instance else: rescue_data = None cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, {'mapping': {'disk': {}}}, rescue_data) self.assertEqual("parallels", cfg.virt_type) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory) self.assertEqual(instance_ref.flavor.vcpus, cfg.vcpus) self.assertEqual(vm_mode.EXE, cfg.os_type) self.assertEqual("/sbin/init", cfg.os_init_path) self.assertIsNone(cfg.os_root) if rescue: self.assertEqual(5, len(cfg.devices)) else: self.assertEqual(4, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestFilesys) device_index = 0 fs = cfg.devices[device_index] self.assertEqual(fs.source_type, "file") self.assertEqual(fs.driver_type, "ploop") self.assertEqual(fs.target_dir, "/") if rescue: device_index = 1 fs = cfg.devices[device_index] self.assertEqual(fs.source_type, "file") self.assertEqual(fs.driver_type, "ploop") self.assertEqual(fs.target_dir, "/mnt/rescue") self.assertIsInstance(cfg.devices[device_index + 1], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[device_index + 2], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[device_index + 3], vconfig.LibvirtConfigGuestVideo) def _test_get_guest_config_parallels_volume(self, vmmode, devices): self.flags(virt_type='parallels', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) ct_instance = self.test_instance.copy() ct_instance["vm_mode"] = vmmode instance_ref = objects.Instance(**ct_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) conn_info = {'driver_volume_type': 'fake'} bdm = objects.BlockDeviceMapping( self.context, **fake_block_device.FakeDbBlockDeviceDict( {'id': 0, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sda'})) info = {'block_device_mapping': driver_block_device.convert_volumes( [bdm])} info['block_device_mapping'][0]['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, info) with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info, None, info) mock_save.assert_called_once_with() self.assertEqual("parallels", cfg.virt_type) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory) self.assertEqual(instance_ref.flavor.vcpus, cfg.vcpus) self.assertEqual(vmmode, cfg.os_type) self.assertIsNone(cfg.os_root) self.assertEqual(devices, len(cfg.devices)) disk_found = False for dev in cfg.devices: result = isinstance(dev, vconfig.LibvirtConfigGuestFilesys) self.assertFalse(result) if (isinstance(dev, vconfig.LibvirtConfigGuestDisk) and (dev.source_path is None or 'disk.local' not in dev.source_path)): self.assertEqual("disk", dev.source_device) self.assertEqual("sda", dev.target_dev) disk_found = True self.assertTrue(disk_found) def test_get_guest_config_parallels_volume(self): self._test_get_guest_config_parallels_volume(vm_mode.EXE, 4) self._test_get_guest_config_parallels_volume(vm_mode.HVM, 6) def test_get_guest_disk_config_rbd_older_config_drive_fall_back(self): # New config drives are stored in rbd but existing instances have # config drives in the old location under the instances path. # Test that the driver falls back to 'flat' for config drive if it # doesn't exist in rbd. self.flags(images_type='rbd', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() mock_rbd_image = mock.Mock() mock_flat_image = mock.Mock() mock_flat_image.libvirt_info.return_value = mock.sentinel.diskconfig drvr.image_backend.image.side_effect = [mock_rbd_image, mock_flat_image] mock_rbd_image.exists.return_value = False instance = objects.Instance() disk_mapping = {'disk.config': {'bus': 'ide', 'dev': 'hdd', 'type': 'file'}} flavor = objects.Flavor(extra_specs={}) diskconfig = drvr._get_guest_disk_config( instance, 'disk.config', disk_mapping, flavor, drvr._get_disk_config_image_type()) self.assertEqual(2, drvr.image_backend.image.call_count) call1 = mock.call(instance, 'disk.config', 'rbd') call2 = mock.call(instance, 'disk.config', 'flat') drvr.image_backend.image.assert_has_calls([call1, call2]) self.assertEqual(mock.sentinel.diskconfig, diskconfig) def _test_prepare_domain_for_snapshot(self, live_snapshot, state): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance_ref = objects.Instance(**self.test_instance) with mock.patch.object(drvr, "suspend") as mock_suspend: drvr._prepare_domain_for_snapshot( self.context, live_snapshot, state, instance_ref) return mock_suspend.called def test_prepare_domain_for_snapshot(self): # Ensure that suspend() is only called on RUNNING or PAUSED instances for test_power_state in power_state.STATE_MAP.keys(): if test_power_state in (power_state.RUNNING, power_state.PAUSED): self.assertTrue(self._test_prepare_domain_for_snapshot( False, test_power_state)) else: self.assertFalse(self._test_prepare_domain_for_snapshot( False, test_power_state)) def test_prepare_domain_for_snapshot_lxc(self): self.flags(virt_type='lxc', group='libvirt') # Ensure that suspend() is never called with LXC for test_power_state in power_state.STATE_MAP.keys(): self.assertFalse(self._test_prepare_domain_for_snapshot( False, test_power_state)) def test_prepare_domain_for_snapshot_live_snapshots(self): # Ensure that suspend() is never called for live snapshots for test_power_state in power_state.STATE_MAP.keys(): self.assertFalse(self._test_prepare_domain_for_snapshot( True, test_power_state)) @mock.patch('os.walk') @mock.patch('os.path.exists') @mock.patch('os.path.getsize') @mock.patch('os.path.isdir') @mock.patch('nova.utils.execute') @mock.patch.object(host.Host, "get_domain") def test_get_instance_disk_info_parallels_ct(self, mock_get_domain, mock_execute, mock_isdir, mock_getsize, mock_exists, mock_walk): dummyxml = ("<domain type='parallels'><name>instance-0000000a</name>" "<os><type>exe</type></os>" "<devices>" "<filesystem type='file'>" "<driver format='ploop' type='ploop'/>" "<source file='/test/disk'/>" "<target dir='/'/></filesystem>" "</devices></domain>") ret = ("image: /test/disk/root.hds\n" "file format: parallels\n" "virtual size: 20G (21474836480 bytes)\n" "disk size: 789M\n") self.flags(virt_type='parallels', group='libvirt') instance = objects.Instance(**self.test_instance) instance.vm_mode = vm_mode.EXE fake_dom = FakeVirtDomain(fake_xml=dummyxml) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_domain.return_value = fake_dom mock_walk.return_value = [('/test/disk', [], ['DiskDescriptor.xml', 'root.hds'])] def getsize_sideeffect(*args, **kwargs): if args[0] == '/test/disk/DiskDescriptor.xml': return 790 if args[0] == '/test/disk/root.hds': return 827326464 mock_getsize.side_effect = getsize_sideeffect mock_exists.return_value = True mock_isdir.return_value = True mock_execute.return_value = (ret, '') info = drvr.get_instance_disk_info(instance) info = jsonutils.loads(info) self.assertEqual(info[0]['type'], 'ploop') self.assertEqual(info[0]['path'], '/test/disk') self.assertEqual(info[0]['disk_size'], 827327254) self.assertEqual(info[0]['over_committed_disk_size'], 20647509226) self.assertEqual(info[0]['virt_disk_size'], 21474836480) class HostStateTestCase(test.NoDBTestCase): cpu_info = {"vendor": "Intel", "model": "pentium", "arch": "i686", "features": ["ssse3", "monitor", "pni", "sse2", "sse", "fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge", "mtrr", "sep", "apic"], "topology": {"cores": "1", "threads": "1", "sockets": "1"}} instance_caps = [(arch.X86_64, "kvm", "hvm"), (arch.I686, "kvm", "hvm")] pci_devices = [{ "dev_id": "pci_0000_04_00_3", "address": "0000:04:10.3", "product_id": '1521', "vendor_id": '8086', "dev_type": fields.PciDeviceType.SRIOV_PF, "phys_function": None}] numa_topology = objects.NUMATopology( cells=[objects.NUMACell( id=1, cpuset=set([1, 2]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell( id=2, cpuset=set([3, 4]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([]))]) class FakeConnection(libvirt_driver.LibvirtDriver): """Fake connection object.""" def __init__(self): super(HostStateTestCase.FakeConnection, self).__init__(fake.FakeVirtAPI(), True) self._host = host.Host("qemu:///system") def _get_memory_mb_total(): return 497 def _get_memory_mb_used(): return 88 self._host.get_memory_mb_total = _get_memory_mb_total self._host.get_memory_mb_used = _get_memory_mb_used def _get_vcpu_total(self): return 1 def _get_vcpu_used(self): return 0 def _get_cpu_info(self): return HostStateTestCase.cpu_info def _get_disk_over_committed_size_total(self): return 0 def _get_local_gb_info(self): return {'total': 100, 'used': 20, 'free': 80} def get_host_uptime(self): return ('10:01:16 up 1:36, 6 users, ' 'load average: 0.21, 0.16, 0.19') def _get_disk_available_least(self): return 13091 def _get_instance_capabilities(self): return HostStateTestCase.instance_caps def _get_pci_passthrough_devices(self): return jsonutils.dumps(HostStateTestCase.pci_devices) def _get_host_numa_topology(self): return HostStateTestCase.numa_topology @mock.patch.object(fakelibvirt, "openAuth") def test_update_status(self, mock_open): mock_open.return_value = fakelibvirt.Connection("qemu:///system") drvr = HostStateTestCase.FakeConnection() stats = drvr.get_available_resource("compute1") self.assertEqual(stats["vcpus"], 1) self.assertEqual(stats["memory_mb"], 497) self.assertEqual(stats["local_gb"], 100) self.assertEqual(stats["vcpus_used"], 0) self.assertEqual(stats["memory_mb_used"], 88) self.assertEqual(stats["local_gb_used"], 20) self.assertEqual(stats["hypervisor_type"], 'QEMU') self.assertEqual(stats["hypervisor_version"], fakelibvirt.FAKE_QEMU_VERSION) self.assertEqual(stats["hypervisor_hostname"], 'compute1') cpu_info = jsonutils.loads(stats["cpu_info"]) self.assertEqual(cpu_info, {"vendor": "Intel", "model": "pentium", "arch": arch.I686, "features": ["ssse3", "monitor", "pni", "sse2", "sse", "fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge", "mtrr", "sep", "apic"], "topology": {"cores": "1", "threads": "1", "sockets": "1"} }) self.assertEqual(stats["disk_available_least"], 80) self.assertEqual(jsonutils.loads(stats["pci_passthrough_devices"]), HostStateTestCase.pci_devices) self.assertThat(objects.NUMATopology.obj_from_db_obj( stats['numa_topology'])._to_dict(), matchers.DictMatches( HostStateTestCase.numa_topology._to_dict())) class LibvirtDriverTestCase(test.NoDBTestCase): """Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver.""" def setUp(self): super(LibvirtDriverTestCase, self).setUp() os_vif.initialize() self.drvr = libvirt_driver.LibvirtDriver( fake.FakeVirtAPI(), read_only=True) self.context = context.get_admin_context() self.test_image_meta = { "disk_format": "raw", } def _create_instance(self, params=None): """Create a test instance.""" if not params: params = {} flavor = objects.Flavor(memory_mb=512, swap=0, vcpu_weight=None, root_gb=10, id=2, name=u'm1.tiny', ephemeral_gb=20, rxtx_factor=1.0, flavorid=u'1', vcpus=1, extra_specs={}) flavor.update(params.pop('flavor', {})) inst = {} inst['id'] = 1 inst['uuid'] = '52d3b512-1152-431f-a8f7-28f0288a622b' inst['os_type'] = 'linux' inst['image_ref'] = uuids.fake_image_ref inst['reservation_id'] = 'r-fakeres' inst['user_id'] = 'fake' inst['project_id'] = 'fake' inst['instance_type_id'] = 2 inst['ami_launch_index'] = 0 inst['host'] = 'host1' inst['root_gb'] = flavor.root_gb inst['ephemeral_gb'] = flavor.ephemeral_gb inst['config_drive'] = True inst['kernel_id'] = 2 inst['ramdisk_id'] = 3 inst['key_data'] = 'ABCDEFG' inst['system_metadata'] = {} inst['metadata'] = {} inst['task_state'] = None inst.update(params) instance = fake_instance.fake_instance_obj( self.context, expected_attrs=['metadata', 'system_metadata', 'pci_devices'], flavor=flavor, **inst) # Attributes which we need to be set so they don't touch the db, # but it's not worth the effort to fake properly for field in ['numa_topology', 'vcpu_model']: setattr(instance, field, None) return instance def test_migrate_disk_and_power_off_exception(self): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .migrate_disk_and_power_off. """ self.counter = 0 self.checked_shared_storage = False def fake_get_instance_disk_info(instance, block_device_info=None): return '[]' def fake_destroy(instance): pass def fake_get_host_ip_addr(): return '10.0.0.1' def fake_execute(*args, **kwargs): self.counter += 1 if self.counter == 1: assert False, "intentional failure" def fake_os_path_exists(path): return True def fake_is_storage_shared(dest, inst_base): self.checked_shared_storage = True return False self.stubs.Set(self.drvr, 'get_instance_disk_info', fake_get_instance_disk_info) self.stubs.Set(self.drvr, '_destroy', fake_destroy) self.stubs.Set(self.drvr, 'get_host_ip_addr', fake_get_host_ip_addr) self.stubs.Set(self.drvr, '_is_storage_shared_with', fake_is_storage_shared) self.stubs.Set(utils, 'execute', fake_execute) self.stub_out('os.path.exists', fake_os_path_exists) ins_ref = self._create_instance() flavor = {'root_gb': 10, 'ephemeral_gb': 20} flavor_obj = objects.Flavor(**flavor) self.assertRaises(AssertionError, self.drvr.migrate_disk_and_power_off, context.get_admin_context(), ins_ref, '10.0.0.2', flavor_obj, None) def _test_migrate_disk_and_power_off(self, flavor_obj, block_device_info=None, params_for_instance=None): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .migrate_disk_and_power_off. """ instance = self._create_instance(params=params_for_instance) disk_info = fake_disk_info_json(instance) def fake_get_instance_disk_info(instance, block_device_info=None): return disk_info def fake_destroy(instance): pass def fake_get_host_ip_addr(): return '10.0.0.1' def fake_execute(*args, **kwargs): pass def fake_copy_image(src, dest, host=None, receive=False, on_execute=None, on_completion=None, compression=True): self.assertIsNotNone(on_execute) self.assertIsNotNone(on_completion) self.stubs.Set(self.drvr, 'get_instance_disk_info', fake_get_instance_disk_info) self.stubs.Set(self.drvr, '_destroy', fake_destroy) self.stubs.Set(self.drvr, 'get_host_ip_addr', fake_get_host_ip_addr) self.stubs.Set(utils, 'execute', fake_execute) self.stubs.Set(libvirt_utils, 'copy_image', fake_copy_image) # dest is different host case out = self.drvr.migrate_disk_and_power_off( context.get_admin_context(), instance, '10.0.0.2', flavor_obj, None, block_device_info=block_device_info) self.assertEqual(out, disk_info) # dest is same host case out = self.drvr.migrate_disk_and_power_off( context.get_admin_context(), instance, '10.0.0.1', flavor_obj, None, block_device_info=block_device_info) self.assertEqual(out, disk_info) def test_migrate_disk_and_power_off(self): flavor = {'root_gb': 10, 'ephemeral_gb': 20} flavor_obj = objects.Flavor(**flavor) self._test_migrate_disk_and_power_off(flavor_obj) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume') def test_migrate_disk_and_power_off_boot_from_volume(self, disconnect_volume): info = {'block_device_mapping': [{'boot_index': None, 'mount_device': '/dev/vdd', 'connection_info': None}, {'boot_index': 0, 'mount_device': '/dev/vda', 'connection_info': None}]} flavor = {'root_gb': 1, 'ephemeral_gb': 0} flavor_obj = objects.Flavor(**flavor) # Note(Mike_D): The size of instance's ephemeral_gb is 0 gb. self._test_migrate_disk_and_power_off( flavor_obj, block_device_info=info, params_for_instance={'image_ref': None, 'flavor': {'root_gb': 1, 'ephemeral_gb': 0}}) disconnect_volume.assert_called_with( info['block_device_mapping'][1]['connection_info'], 'vda') @mock.patch('nova.utils.execute') @mock.patch('nova.virt.libvirt.utils.copy_image') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') def test_migrate_disk_and_power_off_swap(self, mock_get_disk_info, get_host_ip_addr, mock_destroy, mock_copy_image, mock_execute): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .migrate_disk_and_power_off. """ self.copy_or_move_swap_called = False # Original instance config instance = self._create_instance({'flavor': {'root_gb': 10, 'ephemeral_gb': 0}}) disk_info = fake_disk_info_json(instance) mock_get_disk_info.return_value = disk_info get_host_ip_addr.return_value = '10.0.0.1' def fake_copy_image(*args, **kwargs): # disk.swap should not be touched since it is skipped over if '/test/disk.swap' in list(args): self.copy_or_move_swap_called = True def fake_execute(*args, **kwargs): # disk.swap should not be touched since it is skipped over if set(['mv', '/test/disk.swap']).issubset(list(args)): self.copy_or_move_swap_called = True mock_copy_image.side_effect = fake_copy_image mock_execute.side_effect = fake_execute drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Re-size fake instance to 20G root and 1024M swap disk flavor = {'root_gb': 20, 'ephemeral_gb': 0, 'swap': 1024} flavor_obj = objects.Flavor(**flavor) # Destination is same host out = drvr.migrate_disk_and_power_off(context.get_admin_context(), instance, '10.0.0.1', flavor_obj, None) mock_get_disk_info.assert_called_once_with(instance, block_device_info=None) self.assertTrue(get_host_ip_addr.called) mock_destroy.assert_called_once_with(instance) self.assertFalse(self.copy_or_move_swap_called) self.assertEqual(disk_info, out) def _test_migrate_disk_and_power_off_resize_check(self, expected_exc): """Test for nova.virt.libvirt.libvirt_driver.LibvirtConnection .migrate_disk_and_power_off. """ instance = self._create_instance() disk_info = fake_disk_info_json(instance) def fake_get_instance_disk_info(instance, xml=None, block_device_info=None): return disk_info def fake_destroy(instance): pass def fake_get_host_ip_addr(): return '10.0.0.1' self.stubs.Set(self.drvr, 'get_instance_disk_info', fake_get_instance_disk_info) self.stubs.Set(self.drvr, '_destroy', fake_destroy) self.stubs.Set(self.drvr, 'get_host_ip_addr', fake_get_host_ip_addr) flavor = {'root_gb': 10, 'ephemeral_gb': 20} flavor_obj = objects.Flavor(**flavor) # Migration is not implemented for LVM backed instances self.assertRaises(expected_exc, self.drvr.migrate_disk_and_power_off, None, instance, '10.0.0.1', flavor_obj, None) @mock.patch('nova.utils.execute') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '._is_storage_shared_with') def _test_migrate_disk_and_power_off_backing_file(self, shared_storage, mock_is_shared_storage, mock_get_disk_info, mock_destroy, mock_execute): self.convert_file_called = False flavor = {'root_gb': 20, 'ephemeral_gb': 30, 'swap': 0} flavor_obj = objects.Flavor(**flavor) disk_info = [{'type': 'qcow2', 'path': '/test/disk', 'virt_disk_size': '10737418240', 'backing_file': '/base/disk', 'disk_size': '83886080'}] disk_info_text = jsonutils.dumps(disk_info) mock_get_disk_info.return_value = disk_info_text mock_is_shared_storage.return_value = shared_storage def fake_execute(*args, **kwargs): self.assertNotEqual(args[0:2], ['qemu-img', 'convert']) mock_execute.side_effect = fake_execute instance = self._create_instance() out = self.drvr.migrate_disk_and_power_off( context.get_admin_context(), instance, '10.0.0.2', flavor_obj, None) self.assertTrue(mock_is_shared_storage.called) mock_destroy.assert_called_once_with(instance) self.assertEqual(out, disk_info_text) def test_migrate_disk_and_power_off_shared_storage(self): self._test_migrate_disk_and_power_off_backing_file(True) def test_migrate_disk_and_power_off_non_shared_storage(self): self._test_migrate_disk_and_power_off_backing_file(False) def test_migrate_disk_and_power_off_lvm(self): self.flags(images_type='lvm', group='libvirt') def fake_execute(*args, **kwargs): pass self.stubs.Set(utils, 'execute', fake_execute) expected_exc = exception.InstanceFaultRollback self._test_migrate_disk_and_power_off_resize_check(expected_exc) def test_migrate_disk_and_power_off_resize_cannot_ssh(self): def fake_execute(*args, **kwargs): raise processutils.ProcessExecutionError() def fake_is_storage_shared(dest, inst_base): self.checked_shared_storage = True return False self.stubs.Set(self.drvr, '_is_storage_shared_with', fake_is_storage_shared) self.stubs.Set(utils, 'execute', fake_execute) expected_exc = exception.InstanceFaultRollback self._test_migrate_disk_and_power_off_resize_check(expected_exc) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') def test_migrate_disk_and_power_off_resize_error(self, mock_get_disk_info): instance = self._create_instance() flavor = {'root_gb': 5, 'ephemeral_gb': 10} flavor_obj = objects.Flavor(**flavor) mock_get_disk_info.return_value = fake_disk_info_json(instance) self.assertRaises( exception.InstanceFaultRollback, self.drvr.migrate_disk_and_power_off, 'ctx', instance, '10.0.0.1', flavor_obj, None) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') def test_migrate_disk_and_power_off_resize_error_default_ephemeral( self, mock_get_disk_info): # Note(Mike_D): The size of this instance's ephemeral_gb is 20 gb. instance = self._create_instance() flavor = {'root_gb': 10, 'ephemeral_gb': 0} flavor_obj = objects.Flavor(**flavor) mock_get_disk_info.return_value = fake_disk_info_json(instance) self.assertRaises(exception.InstanceFaultRollback, self.drvr.migrate_disk_and_power_off, 'ctx', instance, '10.0.0.1', flavor_obj, None) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') @mock.patch('nova.virt.driver.block_device_info_get_ephemerals') def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get, mock_get_disk_info): mappings = [ { 'device_name': '/dev/sdb4', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': 'swap', 'boot_index': -1, 'volume_size': 1 }, { 'device_name': '/dev/sda1', 'source_type': 'volume', 'destination_type': 'volume', 'device_type': 'disk', 'volume_id': 1, 'guest_format': None, 'boot_index': 1, 'volume_size': 6 }, { 'device_name': '/dev/sda2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': 1, 'device_type': 'disk', 'guest_format': None, 'boot_index': 0, 'volume_size': 4 }, { 'device_name': '/dev/sda3', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': None, 'boot_index': -1, 'volume_size': 3 } ] mock_get.return_value = mappings instance = self._create_instance() # Old flavor, eph is 20, real disk is 3, target is 2, fail flavor = {'root_gb': 10, 'ephemeral_gb': 2} flavor_obj = objects.Flavor(**flavor) mock_get_disk_info.return_value = fake_disk_info_json(instance) self.assertRaises( exception.InstanceFaultRollback, self.drvr.migrate_disk_and_power_off, 'ctx', instance, '10.0.0.1', flavor_obj, None) # Old flavor, eph is 20, real disk is 3, target is 4 flavor = {'root_gb': 10, 'ephemeral_gb': 4} flavor_obj = objects.Flavor(**flavor) self._test_migrate_disk_and_power_off(flavor_obj) @mock.patch('nova.utils.execute') @mock.patch('nova.virt.libvirt.utils.copy_image') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy') @mock.patch('nova.virt.libvirt.utils.get_instance_path') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '._is_storage_shared_with') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') def test_migrate_disk_and_power_off_resize_copy_disk_info(self, mock_disk_info, mock_shared, mock_path, mock_destroy, mock_copy, mock_execuate): instance = self._create_instance() disk_info = fake_disk_info_json(instance) disk_info_text = jsonutils.loads(disk_info) instance_base = os.path.dirname(disk_info_text[0]['path']) flavor = {'root_gb': 10, 'ephemeral_gb': 25} flavor_obj = objects.Flavor(**flavor) mock_disk_info.return_value = disk_info mock_path.return_value = instance_base mock_shared.return_value = False src_disk_info_path = os.path.join(instance_base + '_resize', 'disk.info') with mock.patch.object(os.path, 'exists', autospec=True) \ as mock_exists: # disk.info exists on the source mock_exists.side_effect = \ lambda path: path == src_disk_info_path self.drvr.migrate_disk_and_power_off(context.get_admin_context(), instance, mock.sentinel, flavor_obj, None) self.assertTrue(mock_exists.called) dst_disk_info_path = os.path.join(instance_base, 'disk.info') mock_copy.assert_any_call(src_disk_info_path, dst_disk_info_path, host=mock.sentinel, on_execute=mock.ANY, on_completion=mock.ANY) def test_wait_for_running(self): def fake_get_info(instance): if instance['name'] == "not_found": raise exception.InstanceNotFound(instance_id=instance['uuid']) elif instance['name'] == "running": return hardware.InstanceInfo(state=power_state.RUNNING) else: return hardware.InstanceInfo(state=power_state.SHUTDOWN) self.stubs.Set(self.drvr, 'get_info', fake_get_info) # instance not found case self.assertRaises(exception.InstanceNotFound, self.drvr._wait_for_running, {'name': 'not_found', 'uuid': 'not_found_uuid'}) # instance is running case self.assertRaises(loopingcall.LoopingCallDone, self.drvr._wait_for_running, {'name': 'running', 'uuid': 'running_uuid'}) # else case self.drvr._wait_for_running({'name': 'else', 'uuid': 'other_uuid'}) def test_disk_size_from_instance_disk_info(self): flavor_data = {'root_gb': 10, 'ephemeral_gb': 20, 'swap_gb': 30} inst = objects.Instance(flavor=objects.Flavor(**flavor_data)) self.assertEqual(10 * units.Gi, self.drvr._disk_size_from_instance(inst, 'disk')) self.assertEqual(20 * units.Gi, self.drvr._disk_size_from_instance(inst, 'disk.local')) self.assertEqual(0, self.drvr._disk_size_from_instance(inst, 'disk.swap')) @mock.patch('nova.utils.execute') def test_disk_raw_to_qcow2(self, mock_execute): path = '/test/disk' _path_qcow = path + '_qcow' self.drvr._disk_raw_to_qcow2(path) mock_execute.assert_has_calls([ mock.call('qemu-img', 'convert', '-f', 'raw', '-O', 'qcow2', path, _path_qcow), mock.call('mv', _path_qcow, path)]) @mock.patch('nova.utils.execute') def test_disk_qcow2_to_raw(self, mock_execute): path = '/test/disk' _path_raw = path + '_raw' self.drvr._disk_qcow2_to_raw(path) mock_execute.assert_has_calls([ mock.call('qemu-img', 'convert', '-f', 'qcow2', '-O', 'raw', path, _path_raw), mock.call('mv', _path_raw, path)]) @mock.patch('nova.virt.disk.api.extend') def test_disk_resize_raw(self, mock_extend): image = imgmodel.LocalFileImage("/test/disk", imgmodel.FORMAT_RAW) self.drvr._disk_resize(image, 50) mock_extend.assert_called_once_with(image, 50) @mock.patch('nova.virt.disk.api.can_resize_image') @mock.patch('nova.virt.disk.api.is_image_extendable') @mock.patch('nova.virt.disk.api.extend') def test_disk_resize_qcow2( self, mock_extend, mock_can_resize, mock_is_image_extendable): with test.nested( mock.patch.object( self.drvr, '_disk_qcow2_to_raw'), mock.patch.object( self.drvr, '_disk_raw_to_qcow2'))\ as (mock_disk_qcow2_to_raw, mock_disk_raw_to_qcow2): mock_can_resize.return_value = True mock_is_image_extendable.return_value = True imageqcow2 = imgmodel.LocalFileImage("/test/disk", imgmodel.FORMAT_QCOW2) imageraw = imgmodel.LocalFileImage("/test/disk", imgmodel.FORMAT_RAW) self.drvr._disk_resize(imageqcow2, 50) mock_disk_qcow2_to_raw.assert_called_once_with(imageqcow2.path) mock_extend.assert_called_once_with(imageraw, 50) mock_disk_raw_to_qcow2.assert_called_once_with(imageqcow2.path) def _test_finish_migration(self, power_on, resize_instance=False): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .finish_migration. """ powered_on = power_on self.fake_create_domain_called = False self.fake_disk_resize_called = False create_image_called = [False] def fake_to_xml(context, instance, network_info, disk_info, image_meta=None, rescue=None, block_device_info=None, write_to_disk=False): return "" def fake_plug_vifs(instance, network_info): pass def fake_create_image(context, inst, disk_mapping, suffix='', disk_images=None, network_info=None, block_device_info=None, inject_files=True, fallback_from_host=None): self.assertFalse(inject_files) create_image_called[0] = True def fake_create_domain_and_network( context, xml, instance, network_info, disk_info, block_device_info=None, power_on=True, reboot=False, vifs_already_plugged=False, post_xml_callback=None): self.fake_create_domain_called = True self.assertEqual(powered_on, power_on) self.assertTrue(vifs_already_plugged) def fake_enable_hairpin(): pass def fake_execute(*args, **kwargs): pass def fake_get_info(instance): if powered_on: return hardware.InstanceInfo(state=power_state.RUNNING) else: return hardware.InstanceInfo(state=power_state.SHUTDOWN) def fake_disk_resize(image, size): # Assert that _create_image is called before disk resize, # otherwise we might be trying to resize a disk whose backing # file hasn't been fetched, yet. self.assertTrue(create_image_called[0]) self.fake_disk_resize_called = True self.flags(use_cow_images=True) self.stubs.Set(self.drvr, '_disk_resize', fake_disk_resize) self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml) self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(self.drvr, '_create_image', fake_create_image) self.stubs.Set(self.drvr, '_create_domain_and_network', fake_create_domain_and_network) self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin', fake_enable_hairpin) self.stubs.Set(utils, 'execute', fake_execute) fw = base_firewall.NoopFirewallDriver() self.stubs.Set(self.drvr, 'firewall_driver', fw) self.stubs.Set(self.drvr, 'get_info', fake_get_info) instance = self._create_instance({'config_drive': str(True)}) migration = objects.Migration() migration.source_compute = 'fake-source-compute' migration.dest_compute = 'fake-dest-compute' migration.source_node = 'fake-source-node' migration.dest_node = 'fake-dest-node' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) # Source disks are raw to test conversion disk_info = fake_disk_info_json(instance, type='raw') with test.nested( mock.patch.object(self.drvr, '_disk_raw_to_qcow2', autospec=True), mock.patch.object(self.drvr, '_ensure_console_log_for_instance') ) as (mock_raw_to_qcow2, mock_ensure_console_log): self.drvr.finish_migration( context.get_admin_context(), migration, instance, disk_info, [], image_meta, resize_instance, None, power_on) mock_ensure_console_log.assert_called_once_with(instance) # Assert that we converted the root and ephemeral disks instance_path = libvirt_utils.get_instance_path(instance) convert_calls = [mock.call(os.path.join(instance_path, name)) for name in ('disk', 'disk.local')] mock_raw_to_qcow2.assert_has_calls(convert_calls, any_order=True) # Implicitly assert that we did not convert the config disk self.assertEqual(len(convert_calls), mock_raw_to_qcow2.call_count) self.assertTrue(self.fake_create_domain_called) self.assertEqual( resize_instance, self.fake_disk_resize_called) def test_finish_migration_resize(self): self._test_finish_migration(True, resize_instance=True) def test_finish_migration_power_on(self): self._test_finish_migration(True) def test_finish_migration_power_off(self): self._test_finish_migration(False) def _test_finish_revert_migration(self, power_on): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .finish_revert_migration. """ powered_on = power_on self.fake_create_domain_called = False def fake_execute(*args, **kwargs): pass def fake_plug_vifs(instance, network_info): pass def fake_create_domain(context, xml, instance, network_info, disk_info, block_device_info=None, power_on=None, vifs_already_plugged=None): self.fake_create_domain_called = True self.assertEqual(powered_on, power_on) self.assertTrue(vifs_already_plugged) return mock.MagicMock() def fake_enable_hairpin(): pass def fake_get_info(instance): if powered_on: return hardware.InstanceInfo(state=power_state.RUNNING) else: return hardware.InstanceInfo(state=power_state.SHUTDOWN) def fake_to_xml(context, instance, network_info, disk_info, image_meta=None, rescue=None, block_device_info=None): return "" self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml) self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(utils, 'execute', fake_execute) fw = base_firewall.NoopFirewallDriver() self.stubs.Set(self.drvr, 'firewall_driver', fw) self.stubs.Set(self.drvr, '_create_domain_and_network', fake_create_domain) self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin', fake_enable_hairpin) self.stubs.Set(self.drvr, 'get_info', fake_get_info) self.stubs.Set(utils, 'get_image_from_system_metadata', lambda *a: self.test_image_meta) with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) ins_ref = self._create_instance() os.mkdir(os.path.join(tmpdir, ins_ref['name'])) libvirt_xml_path = os.path.join(tmpdir, ins_ref['name'], 'libvirt.xml') f = open(libvirt_xml_path, 'w') f.close() self.drvr.finish_revert_migration( context.get_admin_context(), ins_ref, [], None, power_on) self.assertTrue(self.fake_create_domain_called) def test_finish_revert_migration_power_on(self): self._test_finish_revert_migration(True) def test_finish_revert_migration_power_off(self): self._test_finish_revert_migration(False) def _test_finish_revert_migration_after_crash(self, backup_made=True, del_inst_failed=False): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend context = 'fake_context' ins_ref = self._create_instance() with test.nested( mock.patch.object(os.path, 'exists', return_value=backup_made), mock.patch.object(libvirt_utils, 'get_instance_path'), mock.patch.object(utils, 'execute'), mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(drvr, '_get_guest_xml'), mock.patch.object(shutil, 'rmtree'), mock.patch.object(loopingcall, 'FixedIntervalLoopingCall'), ) as (mock_stat, mock_path, mock_exec, mock_cdn, mock_ggx, mock_rmtree, mock_looping_call): mock_path.return_value = '/fake/foo' if del_inst_failed: mock_rmtree.side_effect = OSError(errno.ENOENT, 'test exception') drvr.finish_revert_migration(context, ins_ref, []) if backup_made: mock_exec.assert_called_once_with('mv', '/fake/foo_resize', '/fake/foo') else: self.assertFalse(mock_exec.called) def test_finish_revert_migration_after_crash(self): self._test_finish_revert_migration_after_crash(backup_made=True) def test_finish_revert_migration_after_crash_before_new(self): self._test_finish_revert_migration_after_crash(backup_made=True) def test_finish_revert_migration_after_crash_before_backup(self): self._test_finish_revert_migration_after_crash(backup_made=False) def test_finish_revert_migration_after_crash_delete_failed(self): self._test_finish_revert_migration_after_crash(backup_made=True, del_inst_failed=True) def test_finish_revert_migration_preserves_disk_bus(self): def fake_get_guest_xml(context, instance, network_info, disk_info, image_meta, block_device_info=None): self.assertEqual('ide', disk_info['disk_bus']) image_meta = {"disk_format": "raw", "properties": {"hw_disk_bus": "ide"}} instance = self._create_instance() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(utils, 'get_image_from_system_metadata', return_value=image_meta), mock.patch.object(drvr, '_get_guest_xml', side_effect=fake_get_guest_xml)): drvr.finish_revert_migration('', instance, None, power_on=False) def test_finish_revert_migration_snap_backend(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend ins_ref = self._create_instance() with test.nested( mock.patch.object(utils, 'get_image_from_system_metadata'), mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(drvr, '_get_guest_xml')) as ( mock_image, mock_cdn, mock_ggx): mock_image.return_value = {'disk_format': 'raw'} drvr.finish_revert_migration('', ins_ref, None, power_on=False) drvr.image_backend.rollback_to_snap.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME) drvr.image_backend.remove_snap.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True) def test_finish_revert_migration_snap_backend_snapshot_not_found(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend ins_ref = self._create_instance() with test.nested( mock.patch.object(rbd_utils, 'RBDDriver'), mock.patch.object(utils, 'get_image_from_system_metadata'), mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(drvr, '_get_guest_xml')) as ( mock_rbd, mock_image, mock_cdn, mock_ggx): mock_image.return_value = {'disk_format': 'raw'} mock_rbd.rollback_to_snap.side_effect = exception.SnapshotNotFound( snapshot_id='testing') drvr.finish_revert_migration('', ins_ref, None, power_on=False) drvr.image_backend.remove_snap.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True) def test_finish_revert_migration_snap_backend_image_does_not_exist(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend drvr.image_backend.exists.return_value = False ins_ref = self._create_instance() with test.nested( mock.patch.object(rbd_utils, 'RBDDriver'), mock.patch.object(utils, 'get_image_from_system_metadata'), mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(drvr, '_get_guest_xml')) as ( mock_rbd, mock_image, mock_cdn, mock_ggx): mock_image.return_value = {'disk_format': 'raw'} drvr.finish_revert_migration('', ins_ref, None, power_on=False) self.assertFalse(drvr.image_backend.rollback_to_snap.called) self.assertFalse(drvr.image_backend.remove_snap.called) def test_cleanup_failed_migration(self): self.mox.StubOutWithMock(shutil, 'rmtree') shutil.rmtree('/fake/inst') self.mox.ReplayAll() self.drvr._cleanup_failed_migration('/fake/inst') def test_confirm_migration(self): ins_ref = self._create_instance() self.mox.StubOutWithMock(self.drvr, "_cleanup_resize") self.drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1)) self.mox.ReplayAll() self.drvr.confirm_migration("migration_ref", ins_ref, _fake_network_info(self, 1)) def test_cleanup_resize_same_host(self): CONF.set_override('policy_dirs', [], group='oslo_policy') ins_ref = self._create_instance({'host': CONF.host}) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend with test.nested( mock.patch.object(os.path, 'exists'), mock.patch.object(libvirt_utils, 'get_instance_path'), mock.patch.object(utils, 'execute')) as ( mock_exists, mock_get_path, mock_exec): mock_exists.return_value = True mock_get_path.return_value = '/fake/inst' drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1)) mock_get_path.assert_called_once_with(ins_ref) mock_exec.assert_called_once_with('rm', '-rf', '/fake/inst_resize', delay_on_retry=True, attempts=5) def test_cleanup_resize_not_same_host(self): CONF.set_override('policy_dirs', [], group='oslo_policy') host = 'not' + CONF.host ins_ref = self._create_instance({'host': host}) fake_net = _fake_network_info(self, 1) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend with test.nested( mock.patch.object(os.path, 'exists'), mock.patch.object(libvirt_utils, 'get_instance_path'), mock.patch.object(utils, 'execute'), mock.patch.object(drvr, '_undefine_domain'), mock.patch.object(drvr, 'unplug_vifs'), mock.patch.object(drvr, 'unfilter_instance') ) as (mock_exists, mock_get_path, mock_exec, mock_undef, mock_unplug, mock_unfilter): mock_exists.return_value = True mock_get_path.return_value = '/fake/inst' drvr._cleanup_resize(ins_ref, fake_net) mock_get_path.assert_called_once_with(ins_ref) mock_exec.assert_called_once_with('rm', '-rf', '/fake/inst_resize', delay_on_retry=True, attempts=5) mock_undef.assert_called_once_with(ins_ref) mock_unplug.assert_called_once_with(ins_ref, fake_net) mock_unfilter.assert_called_once_with(ins_ref, fake_net) def test_cleanup_resize_snap_backend(self): CONF.set_override('policy_dirs', [], group='oslo_policy') ins_ref = self._create_instance({'host': CONF.host}) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend with test.nested( mock.patch.object(os.path, 'exists'), mock.patch.object(libvirt_utils, 'get_instance_path'), mock.patch.object(utils, 'execute'), mock.patch.object(drvr.image_backend, 'remove_snap')) as ( mock_exists, mock_get_path, mock_exec, mock_remove): mock_exists.return_value = True mock_get_path.return_value = '/fake/inst' drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1)) mock_get_path.assert_called_once_with(ins_ref) mock_exec.assert_called_once_with('rm', '-rf', '/fake/inst_resize', delay_on_retry=True, attempts=5) mock_remove.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True) def test_cleanup_resize_snap_backend_image_does_not_exist(self): CONF.set_override('policy_dirs', [], group='oslo_policy') ins_ref = self._create_instance({'host': CONF.host}) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend drvr.image_backend.exists.return_value = False with test.nested( mock.patch.object(os.path, 'exists'), mock.patch.object(libvirt_utils, 'get_instance_path'), mock.patch.object(utils, 'execute'), mock.patch.object(drvr.image_backend, 'remove_snap')) as ( mock_exists, mock_get_path, mock_exec, mock_remove): mock_exists.return_value = True mock_get_path.return_value = '/fake/inst' drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1)) mock_get_path.assert_called_once_with(ins_ref) mock_exec.assert_called_once_with('rm', '-rf', '/fake/inst_resize', delay_on_retry=True, attempts=5) self.assertFalse(mock_remove.called) def test_get_instance_disk_info_exception(self): instance = self._create_instance() class FakeExceptionDomain(FakeVirtDomain): def __init__(self): super(FakeExceptionDomain, self).__init__() def XMLDesc(self, flags): raise fakelibvirt.libvirtError("Libvirt error") def fake_get_domain(self, instance): return FakeExceptionDomain() self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.assertRaises(exception.InstanceNotFound, self.drvr.get_instance_disk_info, instance) @mock.patch('os.path.exists') @mock.patch.object(lvm, 'list_volumes') def test_lvm_disks(self, listlvs, exists): instance = objects.Instance(uuid=uuids.instance, id=1) self.flags(images_volume_group='vols', group='libvirt') exists.return_value = True listlvs.return_value = ['%s_foo' % uuids.instance, 'other-uuid_foo'] disks = self.drvr._lvm_disks(instance) self.assertEqual(['/dev/vols/%s_foo' % uuids.instance], disks) def test_is_booted_from_volume(self): func = libvirt_driver.LibvirtDriver._is_booted_from_volume instance, disk_mapping = {}, {} self.assertTrue(func(instance, disk_mapping)) disk_mapping['disk'] = 'map' self.assertTrue(func(instance, disk_mapping)) instance['image_ref'] = 'uuid' self.assertFalse(func(instance, disk_mapping)) @mock.patch( 'nova.virt.libvirt.driver.LibvirtDriver._try_fetch_image_cache') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._inject_data') @mock.patch('nova.virt.libvirt.driver.imagecache') def test_data_not_injects_with_configdrive(self, mock_image, mock_inject, mock_fetch): self.flags(inject_partition=-1, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # config_drive is True by default, configdrive.required_by() # returns True instance_ref = self._create_instance() disk_images = {'image_id': None} drvr._create_and_inject_local_root(self.context, instance_ref, False, '', disk_images, [], None, [], True, None) self.assertFalse(mock_inject.called) @mock.patch('nova.virt.netutils.get_injected_network_template') @mock.patch('nova.virt.disk.api.inject_data') @mock.patch.object(libvirt_driver.LibvirtDriver, "_conn") def _test_inject_data(self, driver_params, path, disk_params, mock_conn, disk_inject_data, inj_network, called=True): class ImageBackend(object): path = '/path' def get_model(self, connection): return imgmodel.LocalFileImage(self.path, imgmodel.FORMAT_RAW) def fake_inj_network(*args, **kwds): return args[0] or None inj_network.side_effect = fake_inj_network image_backend = ImageBackend() image_backend.path = path with mock.patch.object( self.drvr.image_backend, 'image', return_value=image_backend): self.flags(inject_partition=0, group='libvirt') self.drvr._inject_data(image_backend, **driver_params) if called: disk_inject_data.assert_called_once_with( mock.ANY, *disk_params, partition=None, mandatory=('files',)) self.assertEqual(disk_inject_data.called, called) def _test_inject_data_default_driver_params(self, **params): return { 'instance': self._create_instance(params=params), 'network_info': None, 'admin_pass': None, 'files': None } def test_inject_data_adminpass(self): self.flags(inject_password=True, group='libvirt') driver_params = self._test_inject_data_default_driver_params() driver_params['admin_pass'] = 'foobar' disk_params = [ None, # key None, # net {}, # metadata 'foobar', # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) # Test with the configuration setted to false. self.flags(inject_password=False, group='libvirt') self._test_inject_data(driver_params, "/path", disk_params, called=False) def test_inject_data_key(self): driver_params = self._test_inject_data_default_driver_params() driver_params['instance']['key_data'] = 'key-content' self.flags(inject_key=True, group='libvirt') disk_params = [ 'key-content', # key None, # net {}, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) # Test with the configuration setted to false. self.flags(inject_key=False, group='libvirt') self._test_inject_data(driver_params, "/path", disk_params, called=False) def test_inject_data_metadata(self): instance_metadata = {'metadata': {'data': 'foo'}} driver_params = self._test_inject_data_default_driver_params( **instance_metadata ) disk_params = [ None, # key None, # net {'data': 'foo'}, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) def test_inject_data_files(self): driver_params = self._test_inject_data_default_driver_params() driver_params['files'] = ['file1', 'file2'] disk_params = [ None, # key None, # net {}, # metadata None, # admin_pass ['file1', 'file2'], # files ] self._test_inject_data(driver_params, "/path", disk_params) def test_inject_data_net(self): driver_params = self._test_inject_data_default_driver_params() driver_params['network_info'] = {'net': 'eno1'} disk_params = [ None, # key {'net': 'eno1'}, # net {}, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) def test_inject_not_exist_image(self): driver_params = self._test_inject_data_default_driver_params() disk_params = [ 'key-content', # key None, # net None, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/fail/path", disk_params, called=False) def _test_attach_detach_interface(self, method, power_state, expected_flags): instance = self._create_instance() network_info = _fake_network_info(self, 1) domain = FakeVirtDomain() self.mox.StubOutWithMock(host.Host, 'get_domain') self.mox.StubOutWithMock(self.drvr.firewall_driver, 'setup_basic_filtering') self.mox.StubOutWithMock(domain, 'attachDeviceFlags') self.mox.StubOutWithMock(domain, 'info') host.Host.get_domain(instance).AndReturn(domain) if method == 'attach_interface': self.drvr.firewall_driver.setup_basic_filtering( instance, [network_info[0]]) fake_image_meta = objects.ImageMeta.from_dict( {'id': instance.image_ref}) expected = self.drvr.vif_driver.get_config( instance, network_info[0], fake_image_meta, instance.flavor, CONF.libvirt.virt_type, self.drvr._host) self.mox.StubOutWithMock(self.drvr.vif_driver, 'get_config') self.drvr.vif_driver.get_config( instance, network_info[0], mox.IsA(objects.ImageMeta), mox.IsA(objects.Flavor), CONF.libvirt.virt_type, self.drvr._host).AndReturn(expected) domain.info().AndReturn([power_state, 1, 2, 3, 4]) if method == 'attach_interface': domain.attachDeviceFlags(expected.to_xml(), flags=expected_flags) elif method == 'detach_interface': domain.detachDeviceFlags(expected.to_xml(), expected_flags) self.mox.ReplayAll() if method == 'attach_interface': self.drvr.attach_interface( instance, fake_image_meta, network_info[0]) elif method == 'detach_interface': self.drvr.detach_interface( instance, network_info[0]) self.mox.VerifyAll() def test_attach_interface_with_running_instance(self): self._test_attach_detach_interface( 'attach_interface', power_state.RUNNING, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_attach_interface_with_pause_instance(self): self._test_attach_detach_interface( 'attach_interface', power_state.PAUSED, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_attach_interface_with_shutdown_instance(self): self._test_attach_detach_interface( 'attach_interface', power_state.SHUTDOWN, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)) def test_detach_interface_with_running_instance(self): self._test_attach_detach_interface( 'detach_interface', power_state.RUNNING, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_detach_interface_with_pause_instance(self): self._test_attach_detach_interface( 'detach_interface', power_state.PAUSED, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_detach_interface_with_shutdown_instance(self): self._test_attach_detach_interface( 'detach_interface', power_state.SHUTDOWN, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)) @mock.patch('nova.virt.libvirt.driver.LOG') def test_detach_interface_device_not_found(self, mock_log): # Asserts that we don't log an error when the interface device is not # found on the guest after a libvirt error during detach. instance = self._create_instance() vif = _fake_network_info(self, 1)[0] guest = mock.Mock(spec='nova.virt.libvirt.guest.Guest') guest.get_power_state = mock.Mock() self.drvr._host.get_guest = mock.Mock(return_value=guest) self.drvr.vif_driver = mock.Mock() error = fakelibvirt.libvirtError( 'no matching network device was found') error.err = (fakelibvirt.VIR_ERR_OPERATION_FAILED,) guest.detach_device = mock.Mock(side_effect=error) # mock out that get_interface_by_mac doesn't find the interface guest.get_interface_by_mac = mock.Mock(return_value=None) self.drvr.detach_interface(instance, vif) guest.get_interface_by_mac.assert_called_once_with(vif['address']) # an error shouldn't be logged, but a warning should be logged self.assertFalse(mock_log.error.called) self.assertEqual(1, mock_log.warning.call_count) self.assertIn('the device is no longer found on the guest', six.text_type(mock_log.warning.call_args[0])) @mock.patch('nova.virt.libvirt.utils.write_to_file') # NOTE(mdbooth): The following 4 mocks are required to execute # get_guest_xml(). @mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled') @mock.patch.object(libvirt_driver.LibvirtDriver, '_build_device_metadata') @mock.patch.object(libvirt_driver.LibvirtDriver, '_supports_direct_io') @mock.patch('nova.api.metadata.base.InstanceMetadata') def _test_rescue(self, instance, mock_instance_metadata, mock_supports_direct_io, mock_build_device_metadata, mock_set_host_enabled, mock_write_to_file, exists=None): self.flags(instances_path=self.useFixture(fixtures.TempDir()).path) mock_build_device_metadata.return_value = None mock_supports_direct_io.return_value = True backend = self.useFixture( fake_imagebackend.ImageBackendFixture(exists=exists)) image_meta = objects.ImageMeta.from_dict( {'id': uuids.image_id, 'name': 'fake'}) network_info = _fake_network_info(self, 1) rescue_password = 'fake_password' domain_xml = [None] def fake_create_domain(xml=None, domain=None, power_on=True, pause=False, post_xml_callback=None): domain_xml[0] = xml if post_xml_callback is not None: post_xml_callback() with mock.patch.object( self.drvr, '_create_domain', side_effect=fake_create_domain) as mock_create_domain: self.drvr.rescue(self.context, instance, network_info, image_meta, rescue_password) self.assertTrue(mock_create_domain.called) return backend, etree.fromstring(domain_xml[0]) def test_rescue(self): instance = self._create_instance({'config_drive': None}) backend, doc = self._test_rescue(instance) # Assert that we created the expected set of disks, and no others self.assertEqual(['disk.rescue', 'kernel.rescue', 'ramdisk.rescue'], sorted(backend.created_disks.keys())) disks = backend.disks kernel_ramdisk = [disks[name + '.rescue'] for name in ('kernel', 'ramdisk')] # Assert that kernel and ramdisk were both created as raw for disk in kernel_ramdisk: self.assertEqual('raw', disk.image_type) # Assert that the root rescue disk was created as the default type self.assertIsNone(disks['disk.rescue'].image_type) # We expect the generated domain to contain disk.rescue and # disk, in that order expected_domain_disk_paths = map( lambda name: disks[name].path, ('disk.rescue', 'disk')) domain_disk_paths = doc.xpath('devices/disk/source/@file') self.assertEqual(expected_domain_disk_paths, domain_disk_paths) # The generated domain xml should contain the rescue kernel # and ramdisk expected_kernel_ramdisk_paths = map( lambda disk: os.path.join(CONF.instances_path, disk.path), kernel_ramdisk) kernel_ramdisk_paths = \ doc.xpath('os/*[self::initrd|self::kernel]/text()') self.assertEqual(expected_kernel_ramdisk_paths, kernel_ramdisk_paths) def test_rescue_config_drive(self): instance = self._create_instance({'config_drive': str(True)}) backend, doc = self._test_rescue( instance, exists=lambda name: name != 'disk.config.rescue') # Assert that we created the expected set of disks, and no others self.assertEqual(['disk.config.rescue', 'disk.rescue', 'kernel.rescue', 'ramdisk.rescue'], sorted(backend.created_disks.keys())) disks = backend.disks config_disk = disks['disk.config.rescue'] kernel_ramdisk = [disks[name + '.rescue'] for name in ('kernel', 'ramdisk')] # Assert that we imported the config disk self.assertTrue(config_disk.import_file.called) # Assert that the config disk, kernel and ramdisk were created as raw for disk in [config_disk] + kernel_ramdisk: self.assertEqual('raw', disk.image_type) # Assert that the root rescue disk was created as the default type self.assertIsNone(disks['disk.rescue'].image_type) # We expect the generated domain to contain disk.rescue, disk, and # disk.config.rescue in that order expected_domain_disk_paths = map( lambda name: disks[name].path, ('disk.rescue', 'disk', 'disk.config.rescue')) domain_disk_paths = doc.xpath('devices/disk/source/@file') self.assertEqual(expected_domain_disk_paths, domain_disk_paths) # The generated domain xml should contain the rescue kernel # and ramdisk expected_kernel_ramdisk_paths = map( lambda disk: os.path.join(CONF.instances_path, disk.path), kernel_ramdisk) kernel_ramdisk_paths = \ doc.xpath('os/*[self::initrd|self::kernel]/text()') self.assertEqual(expected_kernel_ramdisk_paths, kernel_ramdisk_paths) @mock.patch.object(libvirt_utils, 'get_instance_path') @mock.patch.object(libvirt_utils, 'load_file') @mock.patch.object(host.Host, "get_domain") def test_unrescue(self, mock_get_domain, mock_load_file, mock_get_instance_path): dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='block' device='disk'>" "<source dev='/dev/some-vg/some-lv'/>" "<target dev='vda' bus='virtio'/></disk>" "</devices></domain>") mock_get_instance_path.return_value = '/path' instance = objects.Instance(uuid=uuids.instance, id=1) fake_dom = FakeVirtDomain(fake_xml=dummyxml) mock_get_domain.return_value = fake_dom mock_load_file.return_value = "fake_unrescue_xml" unrescue_xml_path = os.path.join('/path', 'unrescue.xml') xml_path = os.path.join('/path', 'libvirt.xml') rescue_file = os.path.join('/path', 'rescue.file') rescue_dir = os.path.join('/path', 'rescue.dir') def isdir_sideeffect(*args, **kwargs): if args[0] == '/path/rescue.file': return False if args[0] == '/path/rescue.dir': return True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(libvirt_utils, 'write_to_file'), mock.patch.object(drvr, '_destroy'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(libvirt_utils, 'file_delete'), mock.patch.object(shutil, 'rmtree'), mock.patch.object(os.path, "isdir", side_effect=isdir_sideeffect), mock.patch.object(drvr, '_lvm_disks', return_value=['lvm.rescue']), mock.patch.object(lvm, 'remove_volumes'), mock.patch.object(glob, 'iglob', return_value=[rescue_file, rescue_dir]) ) as (mock_write, mock_destroy, mock_create, mock_del, mock_rmtree, mock_isdir, mock_lvm_disks, mock_remove_volumes, mock_glob): drvr.unrescue(instance, None) mock_write.assert_called_once_with(xml_path, "fake_unrescue_xml") mock_destroy.assert_called_once_with(instance) mock_create.assert_called_once_with("fake_unrescue_xml", fake_dom) self.assertEqual(2, mock_del.call_count) self.assertEqual(unrescue_xml_path, mock_del.call_args_list[0][0][0]) self.assertEqual(1, mock_rmtree.call_count) self.assertEqual(rescue_dir, mock_rmtree.call_args_list[0][0][0]) self.assertEqual(rescue_file, mock_del.call_args_list[1][0][0]) mock_remove_volumes.assert_called_once_with(['lvm.rescue']) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid=uuids.instance, id=1) exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) exe.assert_called_with('mv', '/path', '/path_del') shutil.assert_called_with('/path_del') self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('os.kill') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_kill_running( self, get_instance_path, kill, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid=uuids.instance, id=1) self.drvr.job_tracker.jobs[instance.uuid] = [3, 4] exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) exe.assert_called_with('mv', '/path', '/path_del') kill.assert_has_calls([mock.call(3, signal.SIGKILL), mock.call(3, 0), mock.call(4, signal.SIGKILL), mock.call(4, 0)]) shutil.assert_called_with('/path_del') self.assertTrue(result) self.assertNotIn(instance.uuid, self.drvr.job_tracker.jobs) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_resize(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid=uuids.instance, id=1) nova.utils.execute.side_effect = [Exception(), None] exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] self.assertEqual(expected, exe.mock_calls) shutil.assert_called_with('/path_del') self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_failed(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid=uuids.instance, id=1) exists.side_effect = [False, False, True, True] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) exe.assert_called_with('mv', '/path', '/path_del') shutil.assert_called_with('/path_del') self.assertFalse(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_mv_failed(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid=uuids.instance, id=1) nova.utils.execute.side_effect = Exception() exists.side_effect = [True, True] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] * 2 self.assertEqual(expected, exe.mock_calls) self.assertFalse(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_resume(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid=uuids.instance, id=1) nova.utils.execute.side_effect = Exception() exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] * 2 self.assertEqual(expected, exe.mock_calls) self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_none(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid=uuids.instance, id=1) nova.utils.execute.side_effect = Exception() exists.side_effect = [False, False, False, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] * 2 self.assertEqual(expected, exe.mock_calls) self.assertEqual(0, len(shutil.mock_calls)) self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_concurrent(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid=uuids.instance, id=1) nova.utils.execute.side_effect = [Exception(), Exception(), None] exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] expected.append(expected[0]) self.assertEqual(expected, exe.mock_calls) shutil.assert_called_with('/path_del') self.assertTrue(result) def _assert_on_id_map(self, idmap, klass, start, target, count): self.assertIsInstance(idmap, klass) self.assertEqual(start, idmap.start) self.assertEqual(target, idmap.target) self.assertEqual(count, idmap.count) def test_get_id_maps(self): self.flags(virt_type="lxc", group="libvirt") CONF.libvirt.virt_type = "lxc" CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"] CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(len(idmaps), 4) self._assert_on_id_map(idmaps[0], vconfig.LibvirtConfigGuestUIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[1], vconfig.LibvirtConfigGuestUIDMap, 1, 20000, 10) self._assert_on_id_map(idmaps[2], vconfig.LibvirtConfigGuestGIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[3], vconfig.LibvirtConfigGuestGIDMap, 1, 20000, 10) def test_get_id_maps_not_lxc(self): CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"] CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(0, len(idmaps)) def test_get_id_maps_only_uid(self): self.flags(virt_type="lxc", group="libvirt") CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"] CONF.libvirt.gid_maps = [] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(2, len(idmaps)) self._assert_on_id_map(idmaps[0], vconfig.LibvirtConfigGuestUIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[1], vconfig.LibvirtConfigGuestUIDMap, 1, 20000, 10) def test_get_id_maps_only_gid(self): self.flags(virt_type="lxc", group="libvirt") CONF.libvirt.uid_maps = [] CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(2, len(idmaps)) self._assert_on_id_map(idmaps[0], vconfig.LibvirtConfigGuestGIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[1], vconfig.LibvirtConfigGuestGIDMap, 1, 20000, 10) def test_instance_on_disk(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(uuid=uuids.instance, id=1) self.assertFalse(drvr.instance_on_disk(instance)) def test_instance_on_disk_rbd(self): self.flags(images_type='rbd', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(uuid=uuids.instance, id=1) self.assertTrue(drvr.instance_on_disk(instance)) def test_get_disk_xml(self): dom_xml = """ <domain type="kvm"> <devices> <disk type="file"> <source file="disk1_file"/> <target dev="vda" bus="virtio"/> <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial> </disk> <disk type="block"> <source dev="/path/to/dev/1"/> <target dev="vdb" bus="virtio" serial="1234"/> </disk> </devices> </domain> """ diska_xml = """<disk type="file" device="disk"> <source file="disk1_file"/> <target bus="virtio" dev="vda"/> <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial> </disk>""" diskb_xml = """<disk type="block" device="disk"> <source dev="/path/to/dev/1"/> <target bus="virtio" dev="vdb"/> </disk>""" dom = mock.MagicMock() dom.XMLDesc.return_value = dom_xml guest = libvirt_guest.Guest(dom) # NOTE(gcb): etree.tostring(node) returns an extra line with # some white spaces, need to strip it. actual_diska_xml = guest.get_disk('vda').to_xml() self.assertEqual(diska_xml.strip(), actual_diska_xml.strip()) actual_diskb_xml = guest.get_disk('vdb').to_xml() self.assertEqual(diskb_xml.strip(), actual_diskb_xml.strip()) self.assertIsNone(guest.get_disk('vdc')) def test_vcpu_model_from_config(self): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) vcpu_model = drv._cpu_config_to_vcpu_model(None, None) self.assertIsNone(vcpu_model) cpu = vconfig.LibvirtConfigGuestCPU() feature1 = vconfig.LibvirtConfigGuestCPUFeature() feature2 = vconfig.LibvirtConfigGuestCPUFeature() feature1.name = 'sse' feature1.policy = cpumodel.POLICY_REQUIRE feature2.name = 'aes' feature2.policy = cpumodel.POLICY_REQUIRE cpu.features = set([feature1, feature2]) cpu.mode = cpumodel.MODE_CUSTOM cpu.sockets = 1 cpu.cores = 2 cpu.threads = 4 vcpu_model = drv._cpu_config_to_vcpu_model(cpu, None) self.assertEqual(cpumodel.MATCH_EXACT, vcpu_model.match) self.assertEqual(cpumodel.MODE_CUSTOM, vcpu_model.mode) self.assertEqual(4, vcpu_model.topology.threads) self.assertEqual(set(['sse', 'aes']), set([f.name for f in vcpu_model.features])) cpu.mode = cpumodel.MODE_HOST_MODEL vcpu_model_1 = drv._cpu_config_to_vcpu_model(cpu, vcpu_model) self.assertEqual(cpumodel.MODE_HOST_MODEL, vcpu_model.mode) self.assertEqual(vcpu_model, vcpu_model_1) @mock.patch.object(lvm, 'get_volume_size', return_value=10) @mock.patch.object(host.Host, "get_guest") @mock.patch.object(dmcrypt, 'delete_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.unfilter_instance') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') @mock.patch.object(objects.Instance, 'save') def test_cleanup_lvm_encrypted(self, mock_save, mock_undefine_domain, mock_unfilter, mock_delete_volume, mock_get_guest, mock_get_size): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance = objects.Instance( uuid=uuids.instance, id=1, ephemeral_key_uuid=uuids.ephemeral_key_uuid) instance.system_metadata = {} block_device_info = {'root_device_name': '/dev/vda', 'ephemerals': [], 'block_device_mapping': []} self.flags(images_type="lvm", group='libvirt') dom_xml = """ <domain type="kvm"> <devices> <disk type="block"> <driver name='qemu' type='raw' cache='none'/> <source dev="/dev/mapper/fake-dmcrypt"/> <target dev="vda" bus="virtio" serial="1234"/> </disk> </devices> </domain> """ dom = mock.MagicMock() dom.XMLDesc.return_value = dom_xml guest = libvirt_guest.Guest(dom) mock_get_guest.return_value = guest drv.cleanup(self.context, instance, 'fake_network', destroy_vifs=False, block_device_info=block_device_info) mock_delete_volume.assert_called_once_with('/dev/mapper/fake-dmcrypt') @mock.patch.object(lvm, 'get_volume_size', return_value=10) @mock.patch.object(host.Host, "get_guest") @mock.patch.object(dmcrypt, 'delete_volume') def _test_cleanup_lvm(self, mock_delete_volume, mock_get_guest, mock_size, encrypted=False): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance = objects.Instance( uuid=uuids.instance, id=1, ephemeral_key_uuid=uuids.ephemeral_key_uuid) block_device_info = {'root_device_name': '/dev/vda', 'ephemerals': [], 'block_device_mapping': []} dev_name = 'fake-dmcrypt' if encrypted else 'fake' dom_xml = """ <domain type="kvm"> <devices> <disk type="block"> <driver name='qemu' type='raw' cache='none'/> <source dev="/dev/mapper/%s"/> <target dev="vda" bus="virtio" serial="1234"/> </disk> </devices> </domain> """ % dev_name dom = mock.MagicMock() dom.XMLDesc.return_value = dom_xml guest = libvirt_guest.Guest(dom) mock_get_guest.return_value = guest drv._cleanup_lvm(instance, block_device_info) if encrypted: mock_delete_volume.assert_called_once_with( '/dev/mapper/fake-dmcrypt') else: self.assertFalse(mock_delete_volume.called) def test_cleanup_lvm(self): self._test_cleanup_lvm() def test_cleanup_encrypted_lvm(self): self._test_cleanup_lvm(encrypted=True) def test_vcpu_model_to_config(self): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) feature = objects.VirtCPUFeature(policy=cpumodel.POLICY_REQUIRE, name='sse') feature_1 = objects.VirtCPUFeature(policy=cpumodel.POLICY_FORBID, name='aes') topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=4) vcpu_model = objects.VirtCPUModel(mode=cpumodel.MODE_HOST_MODEL, features=[feature, feature_1], topology=topo) cpu = drv._vcpu_model_to_cpu_config(vcpu_model) self.assertEqual(cpumodel.MODE_HOST_MODEL, cpu.mode) self.assertEqual(1, cpu.sockets) self.assertEqual(4, cpu.threads) self.assertEqual(2, len(cpu.features)) self.assertEqual(set(['sse', 'aes']), set([f.name for f in cpu.features])) self.assertEqual(set([cpumodel.POLICY_REQUIRE, cpumodel.POLICY_FORBID]), set([f.policy for f in cpu.features])) def test_trigger_crash_dump(self): mock_guest = mock.Mock(libvirt_guest.Guest, id=1) instance = objects.Instance(uuid=uuids.instance, id=1) with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.drvr.trigger_crash_dump(instance) def test_trigger_crash_dump_not_running(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'Requested operation is not valid: domain is not running', error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.inject_nmi = mock.Mock(side_effect=ex) instance = objects.Instance(uuid=uuids.instance, id=1) with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.assertRaises(exception.InstanceNotRunning, self.drvr.trigger_crash_dump, instance) def test_trigger_crash_dump_not_supported(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, '', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.inject_nmi = mock.Mock(side_effect=ex) instance = objects.Instance(uuid=uuids.instance, id=1) with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.assertRaises(exception.TriggerCrashDumpNotSupported, self.drvr.trigger_crash_dump, instance) def test_trigger_crash_dump_unexpected_error(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'UnexpectedError', error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.inject_nmi = mock.Mock(side_effect=ex) instance = objects.Instance(uuid=uuids.instance, id=1) with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.assertRaises(fakelibvirt.libvirtError, self.drvr.trigger_crash_dump, instance) class LibvirtVolumeUsageTestCase(test.NoDBTestCase): """Test for LibvirtDriver.get_all_volume_usage.""" def setUp(self): super(LibvirtVolumeUsageTestCase, self).setUp() self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.c = context.get_admin_context() self.ins_ref = objects.Instance( id=1729, uuid='875a8070-d0b9-4949-8b31-104d125c9a64' ) # verify bootable volume device path also self.bdms = [{'volume_id': 1, 'device_name': '/dev/vde'}, {'volume_id': 2, 'device_name': 'vda'}] def test_get_all_volume_usage(self): def fake_block_stats(instance_name, disk): return (169, 688640, 0, 0, -1) self.stubs.Set(self.drvr, 'block_stats', fake_block_stats) vol_usage = self.drvr.get_all_volume_usage(self.c, [dict(instance=self.ins_ref, instance_bdms=self.bdms)]) expected_usage = [{'volume': 1, 'instance': self.ins_ref, 'rd_bytes': 688640, 'wr_req': 0, 'rd_req': 169, 'wr_bytes': 0}, {'volume': 2, 'instance': self.ins_ref, 'rd_bytes': 688640, 'wr_req': 0, 'rd_req': 169, 'wr_bytes': 0}] self.assertEqual(vol_usage, expected_usage) def test_get_all_volume_usage_device_not_found(self): def fake_get_domain(self, instance): raise exception.InstanceNotFound(instance_id="fakedom") self.stubs.Set(host.Host, 'get_domain', fake_get_domain) vol_usage = self.drvr.get_all_volume_usage(self.c, [dict(instance=self.ins_ref, instance_bdms=self.bdms)]) self.assertEqual(vol_usage, []) class LibvirtNonblockingTestCase(test.NoDBTestCase): """Test libvirtd calls are nonblocking.""" def setUp(self): super(LibvirtNonblockingTestCase, self).setUp() self.flags(connection_uri="test:///default", group='libvirt') def test_connection_to_primitive(self): # Test bug 962840. import nova.virt.libvirt.driver as libvirt_driver drvr = libvirt_driver.LibvirtDriver('') drvr.set_host_enabled = mock.Mock() jsonutils.to_primitive(drvr._conn, convert_instances=True) @mock.patch.object(objects.Service, 'get_by_compute_host') def test_tpool_execute_calls_libvirt(self, mock_svc): conn = fakelibvirt.virConnect() conn.is_expected = True self.mox.StubOutWithMock(eventlet.tpool, 'execute') eventlet.tpool.execute( fakelibvirt.openAuth, 'test:///default', mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(conn) eventlet.tpool.execute( conn.domainEventRegisterAny, None, fakelibvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, mox.IgnoreArg(), mox.IgnoreArg()) if hasattr(fakelibvirt.virConnect, 'registerCloseCallback'): eventlet.tpool.execute( conn.registerCloseCallback, mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) c = driver._get_connection() self.assertTrue(c.is_expected) class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase): """Tests for libvirtDriver.volume_snapshot_create/delete.""" def setUp(self): super(LibvirtVolumeSnapshotTestCase, self).setUp() self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.c = context.get_admin_context() self.flags(instance_name_template='instance-%s') self.flags(qemu_allowed_storage_drivers=[], group='libvirt') # creating instance self.inst = {} self.inst['uuid'] = uuidutils.generate_uuid() self.inst['id'] = '1' # create domain info self.dom_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='disk1_file'/> <target dev='vda' bus='virtio'/> <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio' serial='1234'/> </disk> </devices> </domain>""" # alternate domain info with network-backed snapshot chain self.dom_netdisk_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='disk1_file'/> <target dev='vda' bus='virtio'/> <serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial> </disk> <disk type='network' device='disk'> <driver name='qemu' type='qcow2'/> <source protocol='gluster' name='vol1/root.img'> <host name='server1' port='24007'/> </source> <backingStore type='network' index='1'> <driver name='qemu' type='qcow2'/> <source protocol='gluster' name='vol1/snap.img'> <host name='server1' port='24007'/> </source> <backingStore type='network' index='2'> <driver name='qemu' type='qcow2'/> <source protocol='gluster' name='vol1/snap-b.img'> <host name='server1' port='24007'/> </source> <backingStore/> </backingStore> </backingStore> <target dev='vdb' bus='virtio'/> <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial> </disk> </devices> </domain> """ # XML with netdisk attached, and 1 snapshot taken self.dom_netdisk_xml_2 = """ <domain type='kvm'> <devices> <disk type='file'> <source file='disk1_file'/> <target dev='vda' bus='virtio'/> <serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial> </disk> <disk type='network' device='disk'> <driver name='qemu' type='qcow2'/> <source protocol='gluster' name='vol1/snap.img'> <host name='server1' port='24007'/> </source> <backingStore type='network' index='1'> <driver name='qemu' type='qcow2'/> <source protocol='gluster' name='vol1/root.img'> <host name='server1' port='24007'/> </source> <backingStore/> </backingStore> <target dev='vdb' bus='virtio'/> <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial> </disk> </devices> </domain> """ self.create_info = {'type': 'qcow2', 'snapshot_id': '1234-5678', 'new_file': 'new-file'} self.volume_uuid = '0e38683e-f0af-418f-a3f1-6b67ea0f919d' self.snapshot_id = '9c3ca9f4-9f4e-4dba-bedd-5c5e4b52b162' self.delete_info_1 = {'type': 'qcow2', 'file_to_merge': 'snap.img', 'merge_target_file': None} self.delete_info_2 = {'type': 'qcow2', 'file_to_merge': 'snap.img', 'merge_target_file': 'other-snap.img'} self.delete_info_3 = {'type': 'qcow2', 'file_to_merge': None, 'merge_target_file': None} self.delete_info_netdisk = {'type': 'qcow2', 'file_to_merge': 'snap.img', 'merge_target_file': 'root.img'} self.delete_info_invalid_type = {'type': 'made_up_type', 'file_to_merge': 'some_file', 'merge_target_file': 'some_other_file'} def tearDown(self): super(LibvirtVolumeSnapshotTestCase, self).tearDown() @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.' 'refresh_connection_info') @mock.patch('nova.objects.block_device.BlockDeviceMapping.' 'get_by_volume_and_instance') def test_volume_refresh_connection_info(self, mock_get_by_volume_and_instance, mock_refresh_connection_info): instance = objects.Instance(**self.inst) fake_bdm = fake_block_device.FakeDbBlockDeviceDict({ 'id': 123, 'instance_uuid': uuids.instance, 'device_name': '/dev/sdb', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'connection_info': '{"fake": "connection_info"}'}) fake_bdm = objects.BlockDeviceMapping(self.c, **fake_bdm) mock_get_by_volume_and_instance.return_value = fake_bdm self.drvr._volume_refresh_connection_info(self.c, instance, self.volume_uuid) mock_get_by_volume_and_instance.assert_called_once_with( self.c, self.volume_uuid, instance.uuid) mock_refresh_connection_info.assert_called_once_with(self.c, instance, self.drvr._volume_api, self.drvr) def test_volume_snapshot_create(self, quiesce=True): """Test snapshot creation with file-based disk.""" self.flags(instance_name_template='instance-%s') self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') instance = objects.Instance(**self.inst) new_file = 'new-file' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') self.mox.StubOutWithMock(domain, 'snapshotCreateXML') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) snap_xml_src = ( '<domainsnapshot>\n' ' <disks>\n' ' <disk name="disk1_file" snapshot="external" type="file">\n' ' <source file="new-file"/>\n' ' </disk>\n' ' <disk name="vdb" snapshot="no"/>\n' ' </disks>\n' '</domainsnapshot>\n') # Older versions of libvirt may be missing these. fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32 fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64 snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT) snap_flags_q = (snap_flags | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) if quiesce: domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q) else: domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q).\ AndRaise(fakelibvirt.libvirtError( 'quiescing failed, no qemu-ga')) domain.snapshotCreateXML(snap_xml_src, flags=snap_flags) self.mox.ReplayAll() guest = libvirt_guest.Guest(domain) self.drvr._volume_snapshot_create(self.c, instance, guest, self.volume_uuid, new_file) self.mox.VerifyAll() def test_volume_snapshot_create_libgfapi(self, quiesce=True): """Test snapshot creation with libgfapi network disk.""" self.flags(instance_name_template = 'instance-%s') self.flags(qemu_allowed_storage_drivers = ['gluster'], group='libvirt') self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.dom_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='disk1_file'/> <target dev='vda' bus='virtio'/> <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial> </disk> <disk type='block'> <source protocol='gluster' name='gluster1/volume-1234'> <host name='127.3.4.5' port='24007'/> </source> <target dev='vdb' bus='virtio' serial='1234'/> </disk> </devices> </domain>""" instance = objects.Instance(**self.inst) new_file = 'new-file' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') self.mox.StubOutWithMock(domain, 'snapshotCreateXML') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) snap_xml_src = ( '<domainsnapshot>\n' ' <disks>\n' ' <disk name="disk1_file" snapshot="external" type="file">\n' ' <source file="new-file"/>\n' ' </disk>\n' ' <disk name="vdb" snapshot="no"/>\n' ' </disks>\n' '</domainsnapshot>\n') # Older versions of libvirt may be missing these. fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32 fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64 snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT) snap_flags_q = (snap_flags | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) if quiesce: domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q) else: domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q).\ AndRaise(fakelibvirt.libvirtError( 'quiescing failed, no qemu-ga')) domain.snapshotCreateXML(snap_xml_src, flags=snap_flags) self.mox.ReplayAll() guest = libvirt_guest.Guest(domain) self.drvr._volume_snapshot_create(self.c, instance, guest, self.volume_uuid, new_file) self.mox.VerifyAll() def test_volume_snapshot_create_noquiesce(self): self.test_volume_snapshot_create(quiesce=False) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_can_quiesce(self, ver): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.inst) image_meta = objects.ImageMeta.from_dict( {"properties": { "hw_qemu_guest_agent": "yes"}}) self.assertIsNone(self.drvr._can_quiesce(instance, image_meta)) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_can_quiesce_bad_hyp(self, ver): self.flags(virt_type='lxc', group='libvirt') instance = objects.Instance(**self.inst) image_meta = objects.ImageMeta.from_dict( {"properties": { "hw_qemu_guest_agent": "yes"}}) self.assertRaises(exception.InstanceQuiesceNotSupported, self.drvr._can_quiesce, instance, image_meta) @mock.patch.object(host.Host, 'has_min_version', return_value=False) def test_can_quiesce_bad_ver(self, ver): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.inst) image_meta = {"properties": { "hw_qemu_guest_agent": "yes"}} self.assertRaises(exception.InstanceQuiesceNotSupported, self.drvr._can_quiesce, instance, image_meta) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_can_quiesce_agent_not_enable(self, ver): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.inst) image_meta = objects.ImageMeta.from_dict({}) self.assertRaises(exception.QemuGuestAgentNotEnabled, self.drvr._can_quiesce, instance, image_meta) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_volume_snapshot_create') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_volume_refresh_connection_info') def test_volume_snapshot_create_outer_success(self, mock_refresh, mock_snap_create, mock_loop): class FakeLoopingCall(object): def __init__(self, func): self.func = func def start(self, *a, **k): try: self.func() except loopingcall.LoopingCallDone: pass return self def wait(self): return None mock_loop.side_effect = FakeLoopingCall instance = objects.Instance(**self.inst) domain = FakeVirtDomain(fake_xml=self.dom_xml, id=1) guest = libvirt_guest.Guest(domain) @mock.patch.object(self.drvr, '_volume_api') @mock.patch.object(self.drvr._host, 'get_guest') def _test(mock_get_guest, mock_vol_api): mock_get_guest.return_value = guest mock_vol_api.get_snapshot.return_value = {'status': 'available'} self.drvr.volume_snapshot_create(self.c, instance, self.volume_uuid, self.create_info) mock_get_guest.assert_called_once_with(instance) mock_snap_create.assert_called_once_with( self.c, instance, guest, self.volume_uuid, self.create_info['new_file']) mock_vol_api.update_snapshot_status.assert_called_once_with( self.c, self.create_info['snapshot_id'], 'creating') mock_vol_api.get_snapshot.assert_called_once_with( self.c, self.create_info['snapshot_id']) mock_refresh.assert_called_once_with( self.c, instance, self.volume_uuid) _test() def test_volume_snapshot_create_outer_failure(self): instance = objects.Instance(**self.inst) domain = FakeVirtDomain(fake_xml=self.dom_xml, id=1) guest = libvirt_guest.Guest(domain) self.mox.StubOutWithMock(self.drvr._host, 'get_guest') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_create') self.drvr._host.get_guest(instance).AndReturn(guest) self.drvr._volume_snapshot_create(self.c, instance, guest, self.volume_uuid, self.create_info['new_file']).\ AndRaise(exception.NovaException('oops')) self.drvr._volume_api.update_snapshot_status( self.c, self.create_info['snapshot_id'], 'error') self.mox.ReplayAll() self.assertRaises(exception.NovaException, self.drvr.volume_snapshot_create, self.c, instance, self.volume_uuid, self.create_info) def test_volume_snapshot_delete_1(self): """Deleting newest snapshot -- blockRebase.""" # libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE flag fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) domain.blockRebase('vda', 'snap.img', 0, flags=0) domain.blockJobInfo('vda', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1, 'end': 1000}) domain.blockJobInfo('vda', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8}) def test_volume_snapshot_delete_relative_1(self): """Deleting newest snapshot -- blockRebase using relative flag""" self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) guest = libvirt_guest.Guest(domain) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_guest') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_guest(instance).AndReturn(guest) domain.blockRebase('vda', 'snap.img', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE) domain.blockJobInfo('vda', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1, 'end': 1000}) domain.blockJobInfo('vda', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def _setup_block_rebase_domain_and_guest_mocks(self, dom_xml): mock_domain = mock.Mock(spec=fakelibvirt.virDomain) mock_domain.XMLDesc.return_value = dom_xml guest = libvirt_guest.Guest(mock_domain) exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'virDomainBlockRebase() failed', error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID) mock_domain.blockRebase.side_effect = exc return mock_domain, guest @mock.patch.object(host.Host, "has_min_version", mock.Mock(return_value=True)) @mock.patch("nova.virt.libvirt.guest.Guest.is_active", mock.Mock(return_value=False)) @mock.patch('nova.virt.images.qemu_img_info', return_value=mock.Mock(file_format="fake_fmt")) @mock.patch('nova.utils.execute') def test_volume_snapshot_delete_when_dom_not_running(self, mock_execute, mock_qemu_img_info): """Deleting newest snapshot of a file-based image when the domain is not running should trigger a blockRebase using qemu-img not libvirt. In this test, we rebase the image with another image as backing file. """ mock_domain, guest = self._setup_block_rebase_domain_and_guest_mocks( self.dom_xml) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' with mock.patch.object(self.drvr._host, 'get_guest', return_value=guest): self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) mock_qemu_img_info.assert_called_once_with("snap.img") mock_execute.assert_called_once_with('qemu-img', 'rebase', '-b', 'snap.img', '-F', 'fake_fmt', 'disk1_file') @mock.patch.object(host.Host, "has_min_version", mock.Mock(return_value=True)) @mock.patch("nova.virt.libvirt.guest.Guest.is_active", mock.Mock(return_value=False)) @mock.patch('nova.virt.images.qemu_img_info', return_value=mock.Mock(file_format="fake_fmt")) @mock.patch('nova.utils.execute') def test_volume_snapshot_delete_when_dom_not_running_and_no_rebase_base( self, mock_execute, mock_qemu_img_info): """Deleting newest snapshot of a file-based image when the domain is not running should trigger a blockRebase using qemu-img not libvirt. In this test, the image is rebased onto no backing file (i.e. it will exist independently of any backing file) """ mock_domain, mock_guest = ( self._setup_block_rebase_domain_and_guest_mocks(self.dom_xml)) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_3) self.assertEqual(0, mock_qemu_img_info.call_count) mock_execute.assert_called_once_with('qemu-img', 'rebase', '-b', '', 'disk1_file') @mock.patch.object(host.Host, "has_min_version", mock.Mock(return_value=True)) @mock.patch("nova.virt.libvirt.guest.Guest.is_active", mock.Mock(return_value=False)) def test_volume_snapshot_delete_when_dom_with_nw_disk_not_running(self): """Deleting newest snapshot of a network disk when the domain is not running should raise a NovaException. """ mock_domain, mock_guest = ( self._setup_block_rebase_domain_and_guest_mocks( self.dom_netdisk_xml)) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): ex = self.assertRaises(exception.NovaException, self.drvr._volume_snapshot_delete, self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.assertIn('has not been fully tested', six.text_type(ex)) def test_volume_snapshot_delete_2(self): """Deleting older snapshot -- blockCommit.""" # libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.mox.ReplayAll() self.assertRaises(exception.Invalid, self.drvr._volume_snapshot_delete, self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_2) fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4}) def test_volume_snapshot_delete_relative_2(self): """Deleting older snapshot -- blockCommit using relative flag""" self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) domain.blockCommit('vda', 'other-snap.img', 'snap.img', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE) domain.blockJobInfo('vda', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1, 'end': 1000}) domain.blockJobInfo('vda', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_2) self.mox.VerifyAll() def test_volume_snapshot_delete_nonrelative_null_base(self): # Deleting newest and last snapshot of a volume # with blockRebase. So base of the new image will be null. instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) guest = libvirt_guest.Guest(domain) with test.nested( mock.patch.object(domain, 'XMLDesc', return_value=self.dom_xml), mock.patch.object(self.drvr._host, 'get_guest', return_value=guest), mock.patch.object(domain, 'blockRebase'), mock.patch.object(domain, 'blockJobInfo', return_value={ 'type': 4, # See virDomainBlockJobType enum 'bandwidth': 0, 'cur': 1000, 'end': 1000}) ) as (mock_xmldesc, mock_get_guest, mock_rebase, mock_job_info): self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_3) mock_xmldesc.assert_called_once_with(flags=0) mock_get_guest.assert_called_once_with(instance) mock_rebase.assert_called_once_with('vda', None, 0, flags=0) mock_job_info.assert_called_once_with('vda', flags=0) def test_volume_snapshot_delete_netdisk_nonrelative_null_base(self): # Deleting newest and last snapshot of a network attached volume # with blockRebase. So base of the new image will be null. instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_netdisk_xml_2) guest = libvirt_guest.Guest(domain) with test.nested( mock.patch.object(domain, 'XMLDesc', return_value=self.dom_netdisk_xml_2), mock.patch.object(self.drvr._host, 'get_guest', return_value=guest), mock.patch.object(domain, 'blockRebase'), mock.patch.object(domain, 'blockJobInfo', return_value={ 'type': 0, 'bandwidth': 0, 'cur': 1000, 'end': 1000}) ) as (mock_xmldesc, mock_get_guest, mock_rebase, mock_job_info): self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_3) mock_xmldesc.assert_called_once_with(flags=0) mock_get_guest.assert_called_once_with(instance) mock_rebase.assert_called_once_with('vdb', None, 0, flags=0) mock_job_info.assert_called_once_with('vdb', flags=0) def test_volume_snapshot_delete_outer_success(self): instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete') self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, delete_info=self.delete_info_1) self.drvr._volume_api.update_snapshot_status( self.c, snapshot_id, 'deleting') self.mox.StubOutWithMock(self.drvr, '_volume_refresh_connection_info') self.drvr._volume_refresh_connection_info(self.c, instance, self.volume_uuid) self.mox.ReplayAll() self.drvr.volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def test_volume_snapshot_delete_outer_failure(self): instance = objects.Instance(**self.inst) snapshot_id = '1234-9876' FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete') self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, delete_info=self.delete_info_1).\ AndRaise(exception.NovaException('oops')) self.drvr._volume_api.update_snapshot_status( self.c, snapshot_id, 'error_deleting') self.mox.ReplayAll() self.assertRaises(exception.NovaException, self.drvr.volume_snapshot_delete, self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def test_volume_snapshot_delete_invalid_type(self): instance = objects.Instance(**self.inst) FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.drvr._volume_api.update_snapshot_status( self.c, self.snapshot_id, 'error_deleting') self.mox.ReplayAll() self.assertRaises(exception.NovaException, self.drvr.volume_snapshot_delete, self.c, instance, self.volume_uuid, self.snapshot_id, self.delete_info_invalid_type) def test_volume_snapshot_delete_netdisk_1(self): """Delete newest snapshot -- blockRebase for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml # libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) domain.blockRebase('vdb', 'vdb[1]', 0, flags=0) domain.blockJobInfo('vdb', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1, 'end': 1000}) domain.blockJobInfo('vdb', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8}) def test_volume_snapshot_delete_netdisk_relative_1(self): """Delete newest snapshot -- blockRebase for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) domain.blockRebase('vdb', 'vdb[1]', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE) domain.blockJobInfo('vdb', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1, 'end': 1000}) domain.blockJobInfo('vdb', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def test_volume_snapshot_delete_netdisk_2(self): """Delete older snapshot -- blockCommit for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml # libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.mox.ReplayAll() self.assertRaises(exception.Invalid, self.drvr._volume_snapshot_delete, self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_netdisk) fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4}) def test_volume_snapshot_delete_netdisk_relative_2(self): """Delete older snapshot -- blockCommit for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) domain.blockCommit('vdb', 'vdb[0]', 'vdb[1]', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE) domain.blockJobInfo('vdb', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1, 'end': 1000}) domain.blockJobInfo('vdb', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_netdisk) self.mox.VerifyAll() def _fake_convert_image(source, dest, in_format, out_format, run_as_root=True): libvirt_driver.libvirt_utils.files[dest] = '' class _BaseSnapshotTests(test.NoDBTestCase): def setUp(self): super(_BaseSnapshotTests, self).setUp() self.flags(snapshots_directory='./', group='libvirt') self.context = context.get_admin_context() self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt_utils', fake_libvirt_utils)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) self.image_service = nova.tests.unit.image.fake.stub_out_image_service( self) self.mock_update_task_state = mock.Mock() test_instance = _create_test_instance() self.instance_ref = objects.Instance(**test_instance) self.instance_ref.info_cache = objects.InstanceInfoCache( network_info=None) def _assert_snapshot(self, snapshot, disk_format, expected_properties=None): self.mock_update_task_state.assert_has_calls([ mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD), mock.call(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD)]) props = snapshot['properties'] self.assertEqual(props['image_state'], 'available') self.assertEqual(snapshot['status'], 'active') self.assertEqual(snapshot['disk_format'], disk_format) self.assertEqual(snapshot['name'], 'test-snap') if expected_properties: for expected_key, expected_value in \ six.iteritems(expected_properties): self.assertEqual(expected_value, props[expected_key]) def _create_image(self, extra_properties=None): properties = {'instance_id': self.instance_ref['id'], 'user_id': str(self.context.user_id)} if extra_properties: properties.update(extra_properties) sent_meta = {'name': 'test-snap', 'is_public': False, 'status': 'creating', 'properties': properties} # Create new image. It will be updated in snapshot method # To work with it from snapshot, the single image_service is needed recv_meta = self.image_service.create(self.context, sent_meta) return recv_meta @mock.patch.object(host.Host, 'has_min_version') @mock.patch.object(imagebackend.Image, 'resolve_driver_format') @mock.patch.object(host.Host, 'get_domain') def _snapshot(self, image_id, mock_get_domain, mock_resolve, mock_version): mock_get_domain.return_value = FakeVirtDomain() driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) driver.snapshot(self.context, self.instance_ref, image_id, self.mock_update_task_state) snapshot = self.image_service.show(self.context, image_id) return snapshot def _test_snapshot(self, disk_format, extra_properties=None): recv_meta = self._create_image(extra_properties=extra_properties) snapshot = self._snapshot(recv_meta['id']) self._assert_snapshot(snapshot, disk_format=disk_format, expected_properties=extra_properties) class LibvirtSnapshotTests(_BaseSnapshotTests): def test_ami(self): # Assign different image_ref from nova/images/fakes for testing ami self.instance_ref.image_ref = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' self.instance_ref.system_metadata = \ utils.get_system_metadata_from_image( {'disk_format': 'ami'}) self._test_snapshot(disk_format='ami') @mock.patch.object(fake_libvirt_utils, 'disk_type', new='raw') @mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image) def test_raw(self, mock_convert_image): self._test_snapshot(disk_format='raw') def test_qcow2(self): self._test_snapshot(disk_format='qcow2') @mock.patch.object(fake_libvirt_utils, 'disk_type', new='ploop') @mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image) def test_ploop(self, mock_convert_image): self._test_snapshot(disk_format='ploop') def test_no_image_architecture(self): self.instance_ref.image_ref = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' self._test_snapshot(disk_format='qcow2') def test_no_original_image(self): self.instance_ref.image_ref = '661122aa-1234-dede-fefe-babababababa' self._test_snapshot(disk_format='qcow2') def test_snapshot_metadata_image(self): # Assign an image with an architecture defined (x86_64) self.instance_ref.image_ref = 'a440c04b-79fa-479c-bed1-0b816eaec379' extra_properties = {'architecture': 'fake_arch', 'key_a': 'value_a', 'key_b': 'value_b', 'os_type': 'linux'} self._test_snapshot(disk_format='qcow2', extra_properties=extra_properties) @mock.patch.object(rbd_utils, 'RBDDriver') @mock.patch.object(rbd_utils, 'rbd') def test_raw_with_rbd_clone(self, mock_rbd, mock_driver): self.flags(images_type='rbd', group='libvirt') rbd = mock_driver.return_value rbd.parent_info = mock.Mock(return_value=['test-pool', '', '']) rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd']) with mock.patch.object(fake_libvirt_utils, 'find_disk', return_value=('rbd://some/fake/rbd/image', 'raw')): with mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'): self._test_snapshot(disk_format='raw') rbd.clone.assert_called_with(mock.ANY, mock.ANY, dest_pool='test-pool') rbd.flatten.assert_called_with(mock.ANY, pool='test-pool') @mock.patch.object(rbd_utils, 'RBDDriver') @mock.patch.object(rbd_utils, 'rbd') def test_raw_with_rbd_clone_graceful_fallback(self, mock_rbd, mock_driver): self.flags(images_type='rbd', group='libvirt') rbd = mock_driver.return_value rbd.parent_info = mock.Mock(side_effect=exception.ImageUnacceptable( image_id='fake_id', reason='rbd testing')) with test.nested( mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image), mock.patch.object(fake_libvirt_utils, 'find_disk', return_value=('rbd://some/fake/rbd/image', 'raw')), mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd')): self._test_snapshot(disk_format='raw') self.assertFalse(rbd.clone.called) @mock.patch.object(rbd_utils, 'RBDDriver') @mock.patch.object(rbd_utils, 'rbd') def test_raw_with_rbd_clone_eperm(self, mock_rbd, mock_driver): self.flags(images_type='rbd', group='libvirt') rbd = mock_driver.return_value rbd.parent_info = mock.Mock(return_value=['test-pool', '', '']) rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd']) rbd.clone = mock.Mock(side_effect=exception.Forbidden( image_id='fake_id', reason='rbd testing')) with test.nested( mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image), mock.patch.object(fake_libvirt_utils, 'find_disk', return_value=('rbd://some/fake/rbd/image', 'raw')), mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd')): self._test_snapshot(disk_format='raw') # Ensure that the direct_snapshot attempt was cleaned up rbd.remove_snap.assert_called_with('c', 'd', ignore_errors=False, pool='b', force=True) @mock.patch.object(rbd_utils, 'RBDDriver') @mock.patch.object(rbd_utils, 'rbd') def test_raw_with_rbd_clone_post_process_fails(self, mock_rbd, mock_driver): self.flags(images_type='rbd', group='libvirt') rbd = mock_driver.return_value rbd.parent_info = mock.Mock(return_value=['test-pool', '', '']) rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd']) with test.nested( mock.patch.object(fake_libvirt_utils, 'find_disk', return_value=('rbd://some/fake/rbd/image', 'raw')), mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'), mock.patch.object(self.image_service, 'update', side_effect=test.TestingException)): self.assertRaises(test.TestingException, self._test_snapshot, disk_format='raw') rbd.clone.assert_called_with(mock.ANY, mock.ANY, dest_pool='test-pool') rbd.flatten.assert_called_with(mock.ANY, pool='test-pool') # Ensure that the direct_snapshot attempt was cleaned up rbd.remove_snap.assert_called_with('c', 'd', ignore_errors=True, pool='b', force=True) @mock.patch.object(imagebackend.Image, 'direct_snapshot') @mock.patch.object(imagebackend.Image, 'resolve_driver_format') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(host.Host, 'get_guest') def test_raw_with_rbd_clone_is_live_snapshot(self, mock_get_guest, mock_version, mock_resolve, mock_snapshot): self.flags(disable_libvirt_livesnapshot=False, group='workarounds') self.flags(images_type='rbd', group='libvirt') mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_guest._domain = mock.Mock() mock_get_guest.return_value = mock_guest driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) recv_meta = self._create_image() with mock.patch.object(driver, "suspend") as mock_suspend: driver.snapshot(self.context, self.instance_ref, recv_meta['id'], self.mock_update_task_state) self.assertFalse(mock_suspend.called) @mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image) @mock.patch.object(fake_libvirt_utils, 'find_disk') @mock.patch.object(imagebackend.Image, 'resolve_driver_format') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(host.Host, 'get_guest') @mock.patch.object(rbd_utils, 'RBDDriver') @mock.patch.object(rbd_utils, 'rbd') def test_raw_with_rbd_clone_failure_does_cold_snapshot(self, mock_rbd, mock_driver, mock_get_guest, mock_version, mock_resolve, mock_find_disk, mock_convert): self.flags(disable_libvirt_livesnapshot=False, group='workarounds') self.flags(images_type='rbd', group='libvirt') rbd = mock_driver.return_value rbd.parent_info = mock.Mock(side_effect=exception.ImageUnacceptable( image_id='fake_id', reason='rbd testing')) mock_find_disk.return_value = ('rbd://some/fake/rbd/image', 'raw') mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_guest.get_power_state.return_value = power_state.RUNNING mock_guest._domain = mock.Mock() mock_get_guest.return_value = mock_guest driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) recv_meta = self._create_image() with mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'): with mock.patch.object(driver, "suspend") as mock_suspend: driver.snapshot(self.context, self.instance_ref, recv_meta['id'], self.mock_update_task_state) self.assertTrue(mock_suspend.called) class LXCSnapshotTests(LibvirtSnapshotTests): """Repeat all of the Libvirt snapshot tests, but with LXC enabled""" def setUp(self): super(LXCSnapshotTests, self).setUp() self.flags(virt_type='lxc', group='libvirt') def test_raw_with_rbd_clone_failure_does_cold_snapshot(self): self.skipTest("managedSave is not supported with LXC") class LVMSnapshotTests(_BaseSnapshotTests): @mock.patch.object(fake_libvirt_utils, 'disk_type', new='lvm') @mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image) @mock.patch.object(libvirt_driver.imagebackend.lvm, 'volume_info') def _test_lvm_snapshot(self, disk_format, mock_volume_info, mock_convert_image): self.flags(images_type='lvm', images_volume_group='nova-vg', group='libvirt') self._test_snapshot(disk_format=disk_format) mock_volume_info.assert_has_calls([mock.call('/dev/nova-vg/lv')]) mock_convert_image.assert_called_once_with( '/dev/nova-vg/lv', mock.ANY, 'raw', disk_format, run_as_root=True) def test_raw(self): self._test_lvm_snapshot('raw') def test_qcow2(self): self.flags(snapshot_image_format='qcow2', group='libvirt') self._test_lvm_snapshot('qcow2') ```
[ { "content": "Here is the snippet:\n```python\n\"\"\"Dirac notation for states.\"\"\"\n\n\nfrom sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt,\n Tuple)\nfrom sympy.printing.pretty.stringpict import prettyForm, stringPict\nfrom sympy.physics.quantum.qexpr import QExpr, d...
[ { "content": "Here is the snippet:\n<|memory_start|>```python\n\"\"\"Dirac notation for states.\"\"\"\n\n\nfrom sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt,\n Tuple)\nfrom sympy.printing.pretty.stringpict import prettyForm, stringPict\nfrom sympy.physics.quantum.qexpr...
```python """Dirac notation for states.""" from sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt, Tuple) from sympy.printing.pretty.stringpict import prettyForm, stringPict from sympy.physics.quantum.qexpr import QExpr, dispatch_method __all__ = [ 'KetBase', 'BraBase', 'StateBase', 'State', 'Ket', 'Bra', 'TimeDepState', 'TimeDepBra', 'TimeDepKet', 'Wavefunction' ] #----------------------------------------------------------------------------- # States, bras and kets. #----------------------------------------------------------------------------- # ASCII brackets _lbracket = "<" _rbracket = ">" _straight_bracket = "|" # Unicode brackets # MATHEMATICAL ANGLE BRACKETS _lbracket_ucode = u"\u27E8" _rbracket_ucode = u"\u27E9" # LIGHT VERTICAL BAR _straight_bracket_ucode = u"\u2758" # Other options for unicode printing of <, > and | for Dirac notation. # LEFT-POINTING ANGLE BRACKET # _lbracket = u"\u2329" # _rbracket = u"\u232A" # LEFT ANGLE BRACKET # _lbracket = u"\u3008" # _rbracket = u"\u3009" # VERTICAL LINE # _straight_bracket = u"\u007C" class StateBase(QExpr): """Abstract base class for general abstract states in quantum mechanics. All other state classes defined will need to inherit from this class. It carries the basic structure for all other states such as dual, _eval_adjoint and label. This is an abstract base class and you should not instantiate it directly, instead use State. """ @classmethod def _operators_to_state(self, ops, **options): """ Returns the eigenstate instance for the passed operators. This method should be overridden in subclasses. It will handle being passed either an Operator instance or set of Operator instances. It should return the corresponding state INSTANCE or simply raise a NotImplementedError. See cartesian.py for an example. """ raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!") def _state_to_operators(self, op_classes, **options): """ Returns the operators which this state instance is an eigenstate of. This method should be overridden in subclasses. It will be called on state instances and be passed the operator classes that we wish to make into instances. The state instance will then transform the classes appropriately, or raise a NotImplementedError if it cannot return operator instances. See cartesian.py for examples, """ raise NotImplementedError( "Cannot map this state to operators. Method not implemented!") @property def operators(self): """Return the operator(s) that this state is an eigenstate of""" from operatorset import state_to_operators # import internally to avoid circular import errors return state_to_operators(self) def _enumerate_state(self, num_states, **options): raise NotImplementedError("Cannot enumerate this state!") def _represent_default_basis(self, **options): return self._represent(basis=self.operators) #------------------------------------------------------------------------- # Dagger/dual #------------------------------------------------------------------------- @property def dual(self): """Return the dual state of this one.""" return self.dual_class()._new_rawargs(self.hilbert_space, *self.args) @classmethod def dual_class(self): """Return the class used to construt the dual.""" raise NotImplementedError( 'dual_class must be implemented in a subclass' ) def _eval_adjoint(self): """Compute the dagger of this state using the dual.""" return self.dual #------------------------------------------------------------------------- # Printing #------------------------------------------------------------------------- def _pretty_brackets(self, height, use_unicode=True): # Return pretty printed brackets for the state # Ideally, this could be done by pform.parens but it does not support the angled < and > # Setup for unicode vs ascii if use_unicode: lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode slash, bslash, vert = u'\u2571', u'\u2572', u'\u2502' else: lbracket, rbracket = self.lbracket, self.rbracket slash, bslash, vert = '/', '\\', '|' # If height is 1, just return brackets if height == 1: return stringPict(lbracket), stringPict(rbracket) # Make height even height += (height % 2) brackets = [] for bracket in lbracket, rbracket: # Create left bracket if bracket in set([_lbracket, _lbracket_ucode]): bracket_args = [ ' ' * (height//2 - i - 1) + slash for i in range(height // 2)] bracket_args.extend( [ ' ' * i + bslash for i in range(height // 2)]) # Create right bracket elif bracket in set([_rbracket, _rbracket_ucode]): bracket_args = [ ' ' * i + bslash for i in range(height // 2)] bracket_args.extend([ ' ' * ( height//2 - i - 1) + slash for i in range(height // 2)]) # Create straight bracket elif bracket in set([_straight_bracket, _straight_bracket_ucode]): bracket_args = [vert for i in range(height)] else: raise ValueError(bracket) brackets.append( stringPict('\n'.join(bracket_args), baseline=height//2)) return brackets def _sympystr(self, printer, *args): contents = self._print_contents(printer, *args) return '%s%s%s' % (self.lbracket, contents, self.rbracket) def _pretty(self, printer, *args): from sympy.printing.pretty.stringpict import prettyForm # Get brackets pform = self._print_contents_pretty(printer, *args) lbracket, rbracket = self._pretty_brackets( pform.height(), printer._use_unicode) # Put together state pform = prettyForm(*pform.left(lbracket)) pform = prettyForm(*pform.right(rbracket)) return pform def _latex(self, printer, *args): contents = self._print_contents_latex(printer, *args) # The extra {} brackets are needed to get matplotlib's latex # rendered to render this properly. return '{%s%s%s}' % (self.lbracket_latex, contents, self.rbracket_latex) class KetBase(StateBase): """Base class for Kets. This class defines the dual property and the brackets for printing. This is an abstract base class and you should not instantiate it directly, instead use Ket. """ lbracket = _straight_bracket rbracket = _rbracket lbracket_ucode = _straight_bracket_ucode rbracket_ucode = _rbracket_ucode lbracket_latex = r'\left|' rbracket_latex = r'\right\rangle ' @classmethod def default_args(self): return ("psi",) @classmethod def dual_class(self): return BraBase def __mul__(self, other): """KetBase*other""" from sympy.physics.quantum.operator import OuterProduct if isinstance(other, BraBase): return OuterProduct(self, other) else: return Expr.__mul__(self, other) def __rmul__(self, other): """other*KetBase""" from sympy.physics.quantum.innerproduct import InnerProduct if isinstance(other, BraBase): return InnerProduct(other, self) else: return Expr.__rmul__(self, other) #------------------------------------------------------------------------- # _eval_* methods #------------------------------------------------------------------------- def _eval_innerproduct(self, bra, **hints): """Evaluate the inner product betweeen this ket and a bra. This is called to compute <bra|ket>, where the ket is ``self``. This method will dispatch to sub-methods having the format:: ``def _eval_innerproduct_BraClass(self, **hints):`` Subclasses should define these methods (one for each BraClass) to teach the ket how to take inner products with bras. """ return dispatch_method(self, '_eval_innerproduct', bra, **hints) def _apply_operator(self, op, **options): """Apply an Operator to this Ket. This method will dispatch to methods having the format:: ``def _apply_operator_OperatorName(op, **options):`` Subclasses should define these methods (one for each OperatorName) to teach the Ket how operators act on it. Parameters ========== op : Operator The Operator that is acting on the Ket. options : dict A dict of key/value pairs that control how the operator is applied to the Ket. """ return dispatch_method(self, '_apply_operator', op, **options) class BraBase(StateBase): """Base class for Bras. This class defines the dual property and the brackets for printing. This is an abstract base class and you should not instantiate it directly, instead use Bra. """ lbracket = _lbracket rbracket = _straight_bracket lbracket_ucode = _lbracket_ucode rbracket_ucode = _straight_bracket_ucode lbracket_latex = r'\left\langle ' rbracket_latex = r'\right|' @classmethod def _operators_to_state(self, ops, **options): state = self.dual_class().operators_to_state(ops, **options) return state.dual def _state_to_operators(self, op_classes, **options): return self.dual._state_to_operators(op_classes, **options) def _enumerate_state(self, num_states, **options): dual_states = self.dual._enumerate_state(num_states, **options) return map(lambda x: x.dual, dual_states) @classmethod def default_args(self): return self.dual_class().default_args() @classmethod def dual_class(self): return KetBase def __mul__(self, other): """BraBase*other""" from sympy.physics.quantum.innerproduct import InnerProduct if isinstance(other, KetBase): return InnerProduct(self, other) else: return Expr.__mul__(self, other) def __rmul__(self, other): """other*BraBase""" from sympy.physics.quantum.operator import OuterProduct if isinstance(other, KetBase): return OuterProduct(other, self) else: return Expr.__rmul__(self, other) def _represent(self, **options): """A default represent that uses the Ket's version.""" from sympy.physics.quantum.dagger import Dagger return Dagger(self.dual._represent(**options)) class State(StateBase): """General abstract quantum state used as a base class for Ket and Bra.""" pass class Ket(State, KetBase): """A general time-independent Ket in quantum mechanics. Inherits from State and KetBase. This class should be used as the base class for all physical, time-independent Kets in a system. This class and its subclasses will be the main classes that users will use for expressing Kets in Dirac notation [1]_. Parameters ========== args : tuple The list of numbers or parameters that uniquely specify the ket. This will usually be its symbol or its quantum numbers. For time-dependent state, this will include the time. Examples ======== Create a simple Ket and looking at its properties:: >>> from sympy.physics.quantum import Ket, Bra >>> from sympy import symbols, I >>> k = Ket('psi') >>> k |psi> >>> k.hilbert_space H >>> k.is_commutative False >>> k.label (psi,) Ket's know about their associated bra:: >>> k.dual <psi| >>> k.dual_class() <class 'sympy.physics.quantum.state.Bra'> Take a linear combination of two kets:: >>> k0 = Ket(0) >>> k1 = Ket(1) >>> 2*I*k0 - 4*k1 2*I*|0> - 4*|1> Compound labels are passed as tuples:: >>> n, m = symbols('n,m') >>> k = Ket(n,m) >>> k |nm> References ========== .. [1] http://en.wikipedia.org/wiki/Bra-ket_notation """ @classmethod def dual_class(self): return Bra class Bra(State, BraBase): """A general time-independent Bra in quantum mechanics. Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This class and its subclasses will be the main classes that users will use for expressing Bras in Dirac notation. Parameters ========== args : tuple The list of numbers or parameters that uniquely specify the ket. This will usually be its symbol or its quantum numbers. For time-dependent state, this will include the time. Examples ======== Create a simple Bra and look at its properties:: >>> from sympy.physics.quantum import Ket, Bra >>> from sympy import symbols, I >>> b = Bra('psi') >>> b <psi| >>> b.hilbert_space H >>> b.is_commutative False Bra's know about their dual Ket's:: >>> b.dual |psi> >>> b.dual_class() <class 'sympy.physics.quantum.state.Ket'> Like Kets, Bras can have compound labels and be manipulated in a similar manner:: >>> n, m = symbols('n,m') >>> b = Bra(n,m) - I*Bra(m,n) >>> b -I*<mn| + <nm| Symbols in a Bra can be substituted using ``.subs``:: >>> b.subs(n,m) <mm| - I*<mm| References ========== .. [1] http://en.wikipedia.org/wiki/Bra-ket_notation """ @classmethod def dual_class(self): return Ket #----------------------------------------------------------------------------- # Time dependent states, bras and kets. #----------------------------------------------------------------------------- class TimeDepState(StateBase): """Base class for a general time-dependent quantum state. This class is used as a base class for any time-dependent state. The main difference between this class and the time-independent state is that this class takes a second argument that is the time in addition to the usual label argument. Parameters ========== args : tuple The list of numbers or parameters that uniquely specify the ket. This will usually be its symbol or its quantum numbers. For time-dependent state, this will include the time as the final argument. """ #------------------------------------------------------------------------- # Initialization #------------------------------------------------------------------------- @classmethod def default_args(self): return ("psi", "t") #------------------------------------------------------------------------- # Properties #------------------------------------------------------------------------- @property def label(self): """The label of the state.""" return self.args[:-1] @property def time(self): """The time of the state.""" return self.args[-1] #------------------------------------------------------------------------- # Printing #------------------------------------------------------------------------- def _print_time(self, printer, *args): return printer._print(self.time, *args) _print_time_repr = _print_time _print_time_latex = _print_time def _print_time_pretty(self, printer, *args): pform = printer._print(self.time, *args) return pform def _print_contents(self, printer, *args): label = self._print_label(printer, *args) time = self._print_time(printer, *args) return '%s;%s' % (label, time) def _print_label_repr(self, printer, *args): label = self._print_sequence(self.label, ',', printer, *args) time = self._print_time_repr(printer, *args) return '%s,%s' % (label, time) def _print_contents_pretty(self, printer, *args): label = self._print_label_pretty(printer, *args) time = self._print_time_pretty(printer, *args) return printer._print_seq((label, time), delimiter=';') def _print_contents_latex(self, printer, *args): label = self._print_sequence( self.label, self._label_separator, printer, *args) time = self._print_time_latex(printer, *args) return '%s;%s' % (label, time) class TimeDepKet(TimeDepState, KetBase): """General time-dependent Ket in quantum mechanics. This inherits from ``TimeDepState`` and ``KetBase`` and is the main class that should be used for Kets that vary with time. Its dual is a ``TimeDepBra``. Parameters ========== args : tuple The list of numbers or parameters that uniquely specify the ket. This will usually be its symbol or its quantum numbers. For time-dependent state, this will include the time as the final argument. Examples ======== Create a TimeDepKet and look at its attributes:: >>> from sympy.physics.quantum import TimeDepKet >>> k = TimeDepKet('psi', 't') >>> k |psi;t> >>> k.time t >>> k.label (psi,) >>> k.hilbert_space H TimeDepKets know about their dual bra:: >>> k.dual <psi;t| >>> k.dual_class() <class 'sympy.physics.quantum.state.TimeDepBra'> """ @classmethod def dual_class(self): return TimeDepBra class TimeDepBra(TimeDepState, BraBase): """General time-dependent Bra in quantum mechanics. This inherits from TimeDepState and BraBase and is the main class that should be used for Bras that vary with time. Its dual is a TimeDepBra. Parameters ========== args : tuple The list of numbers or parameters that uniquely specify the ket. This will usually be its symbol or its quantum numbers. For time-dependent state, this will include the time as the final argument. Examples ======== >>> from sympy.physics.quantum import TimeDepBra >>> from sympy import symbols, I >>> b = TimeDepBra('psi', 't') >>> b <psi;t| >>> b.time t >>> b.label (psi,) >>> b.hilbert_space H >>> b.dual |psi;t> """ @classmethod def dual_class(self): return TimeDepKet class Wavefunction(Function): """Class for representations in continuous bases This class takes an expression and coordinates in its constructor. It can be used to easily calculate normalizations and probabilities. Parameters ========== expr : Expr The expression representing the functional form of the w.f. coords : Symbol or tuple The coordinates to be integrated over, and their bounds Examples ======== Particle in a box, specifying bounds in the more primitive way of using Piecewise: >>> from sympy import Symbol, Piecewise, pi, N >>> from sympy.functions import sqrt, sin >>> from sympy.physics.quantum.state import Wavefunction >>> x = Symbol('x', real=True) >>> n = 1 >>> L = 1 >>> g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True)) >>> f = Wavefunction(g, x) >>> f.norm 1 >>> f.is_normalized True >>> p = f.prob() >>> p(0) 0 >>> p(L) 0 >>> p(0.5) 2 >>> p(0.85*L) 2*sin(0.85*pi)**2 >>> N(p(0.85*L)) 0.412214747707527 Additionally, you can specify the bounds of the function and the indices in a more compact way: >>> from sympy import symbols, pi, diff >>> from sympy.functions import sqrt, sin >>> from sympy.physics.quantum.state import Wavefunction >>> x, L = symbols('x,L', positive=True) >>> n = symbols('n', integer=True) >>> g = sqrt(2/L)*sin(n*pi*x/L) >>> f = Wavefunction(g, (x, 0, L)) >>> f.norm 1 >>> f(L+1) 0 >>> f(L-1) sqrt(2)*sin(pi*n*(L - 1)/L)/sqrt(L) >>> f(-1) 0 >>> f(0.85) sqrt(2)*sin(0.85*pi*n/L)/sqrt(L) >>> f(0.85, n=1, L=1) sqrt(2)*sin(0.85*pi) >>> f.is_commutative False All arguments are automatically sympified, so you can define the variables as strings rather than symbols: >>> expr = x**2 >>> f = Wavefunction(expr, 'x') >>> type(f.variables[0]) <class 'sympy.core.symbol.Symbol'> Derivatives of Wavefunctions will return Wavefunctions: >>> diff(f, x) Wavefunction(2*x, x) """ #Any passed tuples for coordinates and their bounds need to be #converted to Tuples before Function's constructor is called, to #avoid errors from calling is_Float in the constructor def __new__(cls, *args, **options): new_args = [None for i in args] ct = 0 for arg in args: if isinstance(arg, tuple): new_args[ct] = Tuple(*arg) else: new_args[ct] = arg ct += 1 return super(Function, cls).__new__(cls, *new_args, **options) def __call__(self, *args, **options): var = self.variables if len(args) != len(var): raise NotImplementedError( "Incorrect number of arguments to function!") ct = 0 #If the passed value is outside the specified bounds, return 0 for v in var: lower, upper = self.limits[v] #Do the comparison to limits only if the passed symbol is actually #a symbol present in the limits; #Had problems with a comparison of x > L if isinstance(args[ct], Expr) and \ not (lower in args[ct].free_symbols or upper in args[ct].free_symbols): continue if args[ct] < lower or args[ct] > upper: return 0 ct += 1 expr = self.expr #Allows user to make a call like f(2, 4, m=1, n=1) for symbol in list(expr.free_symbols): if str(symbol) in options.keys(): val = options[str(symbol)] expr = expr.subs(symbol, val) return expr.subs(zip(var, args)) def _eval_derivative(self, symbol): expr = self.expr deriv = expr._eval_derivative(symbol) return Wavefunction(deriv, *self.args[1:]) def _eval_conjugate(self): return Wavefunction(conjugate(self.expr), *self.args[1:]) def _eval_transpose(self): return self @property def free_symbols(self): return self.expr.free_symbols @property def is_commutative(self): """ Override Function's is_commutative so that order is preserved in represented expressions """ return False @classmethod def eval(self, *args): return None @property def variables(self): """ Return the coordinates which the wavefunction depends on Examples ======== >>> from sympy.physics.quantum.state import Wavefunction >>> from sympy import symbols >>> x,y = symbols('x,y') >>> f = Wavefunction(x*y, x, y) >>> f.variables (x, y) >>> g = Wavefunction(x*y, x) >>> g.variables (x,) """ var = [g[0] if isinstance(g, Tuple) else g for g in self._args[1:]] return tuple(var) @property def limits(self): """ Return the limits of the coordinates which the w.f. depends on If no limits are specified, defaults to ``(-oo, oo)``. Examples ======== >>> from sympy.physics.quantum.state import Wavefunction >>> from sympy import symbols >>> x, y = symbols('x, y') >>> f = Wavefunction(x**2, (x, 0, 1)) >>> f.limits {x: (0, 1)} >>> f = Wavefunction(x**2, x) >>> f.limits {x: (-oo, oo)} >>> f = Wavefunction(x**2 + y**2, x, (y, -1, 2)) >>> f.limits {x: (-oo, oo), y: (-1, 2)} """ limits = [(g[1], g[2]) if isinstance(g, Tuple) else (-oo, oo) for g in self._args[1:]] return dict(zip(self.variables, tuple(limits))) @property def expr(self): """ Return the expression which is the functional form of the Wavefunction Examples ======== >>> from sympy.physics.quantum.state import Wavefunction >>> from sympy import symbols >>> x, y = symbols('x, y') >>> f = Wavefunction(x**2, x) >>> f.expr x**2 """ return self._args[0] @property def is_normalized(self): """ Returns true if the Wavefunction is properly normalized Examples ======== >>> from sympy import symbols, pi >>> from sympy.functions import sqrt, sin >>> from sympy.physics.quantum.state import Wavefunction >>> x, L = symbols('x,L', positive=True) >>> n = symbols('n', integer=True) >>> g = sqrt(2/L)*sin(n*pi*x/L) >>> f = Wavefunction(g, (x, 0, L)) >>> f.is_normalized True """ return (self.norm == 1.0) @property @cacheit def norm(self): """ Return the normalization of the specified functional form. This function integrates over the coordinates of the Wavefunction, with the bounds specified. Examples ======== >>> from sympy import symbols, pi >>> from sympy.functions import sqrt, sin >>> from sympy.physics.quantum.state import Wavefunction >>> x, L = symbols('x,L', positive=True) >>> n = symbols('n', integer=True) >>> g = sqrt(2/L)*sin(n*pi*x/L) >>> f = Wavefunction(g, (x, 0, L)) >>> f.norm 1 >>> g = sin(n*pi*x/L) >>> f = Wavefunction(g, (x, 0, L)) >>> f.norm sqrt(2)*sqrt(L)/2 """ exp = self.expr*conjugate(self.expr) var = self.variables limits = self.limits for v in var: curr_limits = limits[v] exp = integrate(exp, (v, curr_limits[0], curr_limits[1])) return sqrt(exp) def normalize(self): """ Return a normalized version of the Wavefunction Examples ======== >>> from sympy import symbols, pi >>> from sympy.functions import sqrt, sin >>> from sympy.physics.quantum.state import Wavefunction >>> x, L = symbols('x,L', real=True) >>> n = symbols('n', integer=True) >>> g = sin(n*pi*x/L) >>> f = Wavefunction(g, (x, 0, L)) >>> f.normalize() Wavefunction(sqrt(2)*sin(pi*n*x/L)/sqrt(L), (x, 0, L)) """ const = self.norm if const == oo: raise NotImplementedError("The function is not normalizable!") else: return Wavefunction((const)**(-1)*self.expr, *self.args[1:]) def prob(self): """ Return the absolute magnitude of the w.f., `|\psi(x)|^2` Examples ======== >>> from sympy import symbols, pi >>> from sympy.functions import sqrt, sin >>> from sympy.physics.quantum.state import Wavefunction >>> x, L = symbols('x,L', real=True) >>> n = symbols('n', integer=True) >>> g = sin(n*pi*x/L) >>> f = Wavefunction(g, (x, 0, L)) >>> f.prob() Wavefunction(sin(pi*n*x/L)**2, x) """ return Wavefunction(self.expr*conjugate(self.expr), *self.variables) ```
[ { "content": "```python\nimport pytz\nfrom pytz import timezone\nfrom datetime import timedelta, datetime, date, time\nimport re\nfrom corehq.apps.casegroups.models import CommCareCaseGroup\nfrom corehq.apps.hqcase.dbaccessors import get_case_ids_in_domain\nfrom corehq.apps.reminders.dbaccessors import get_surv...
[ { "content": "<|memory_start|>```python\nimport pytz\nfrom pytz import timezone\nfrom datetime import timedelta, datetime, date, time\nimport re\nfrom corehq.apps.casegroups.models import CommCareCaseGroup\nfrom corehq.apps.hqcase.dbaccessors import get_case_ids_in_domain\nfrom corehq.apps.reminders.dbaccessors...
```python import pytz from pytz import timezone from datetime import timedelta, datetime, date, time import re from corehq.apps.casegroups.models import CommCareCaseGroup from corehq.apps.hqcase.dbaccessors import get_case_ids_in_domain from corehq.apps.reminders.dbaccessors import get_surveys_in_domain from dimagi.ext.couchdbkit import * from casexml.apps.case.models import CommCareCase from corehq.apps.sms.models import CommConnectCase from corehq.apps.users.cases import get_owner_id, get_wrapped_owner from corehq.apps.users.models import CouchUser from corehq.apps.groups.models import Group from dimagi.utils.parsing import string_to_datetime, json_format_datetime from dateutil.parser import parse from corehq.apps.reminders.util import get_form_name, enqueue_reminder_directly from couchdbkit.exceptions import ResourceConflict from couchdbkit.resource import ResourceNotFound from corehq.apps.sms.util import create_task, close_task, update_task from corehq.apps.smsforms.app import submit_unfinished_form from dimagi.utils.couch import LockableMixIn, CriticalSection from dimagi.utils.couch.cache.cache_core import get_redis_client from dimagi.utils.multithreading import process_fast from dimagi.utils.logging import notify_exception from random import randint from django.conf import settings from dimagi.utils.couch.database import iter_docs class IllegalModelStateException(Exception): pass METHOD_SMS = "sms" METHOD_SMS_CALLBACK = "callback" METHOD_SMS_SURVEY = "survey" METHOD_IVR_SURVEY = "ivr_survey" METHOD_EMAIL = "email" METHOD_STRUCTURED_SMS = "structured_sms" METHOD_CHOICES = [ METHOD_SMS, METHOD_SMS_CALLBACK, METHOD_SMS_SURVEY, METHOD_IVR_SURVEY, METHOD_EMAIL, ] # The Monday - Sunday constants are meant to match the result from # date.weekday() DAY_ANY = -1 DAY_MON = 0 DAY_TUE = 1 DAY_WED = 2 DAY_THU = 3 DAY_FRI = 4 DAY_SAT = 5 DAY_SUN = 6 DAY_OF_WEEK_CHOICES = [ DAY_ANY, DAY_MON, DAY_TUE, DAY_WED, DAY_THU, DAY_FRI, DAY_SAT, DAY_SUN, ] REPEAT_SCHEDULE_INDEFINITELY = -1 EVENT_AS_SCHEDULE = "SCHEDULE" EVENT_AS_OFFSET = "OFFSET" EVENT_INTERPRETATIONS = [EVENT_AS_SCHEDULE, EVENT_AS_OFFSET] UI_SIMPLE_FIXED = "SIMPLE_FIXED" UI_COMPLEX = "COMPLEX" UI_CHOICES = [UI_SIMPLE_FIXED, UI_COMPLEX] RECIPIENT_SENDER = "SENDER" RECIPIENT_USER = "USER" RECIPIENT_OWNER = "OWNER" RECIPIENT_CASE = "CASE" RECIPIENT_PARENT_CASE = "PARENT_CASE" RECIPIENT_ALL_SUBCASES = "ALL_SUBCASES" RECIPIENT_SUBCASE = "SUBCASE" RECIPIENT_SURVEY_SAMPLE = "SURVEY_SAMPLE" RECIPIENT_USER_GROUP = "USER_GROUP" RECIPIENT_CHOICES = [ RECIPIENT_USER, RECIPIENT_OWNER, RECIPIENT_CASE, RECIPIENT_SURVEY_SAMPLE, RECIPIENT_PARENT_CASE, RECIPIENT_SUBCASE, RECIPIENT_USER_GROUP, ] KEYWORD_RECIPIENT_CHOICES = [RECIPIENT_SENDER, RECIPIENT_OWNER, RECIPIENT_USER_GROUP] KEYWORD_ACTION_CHOICES = [METHOD_SMS, METHOD_SMS_SURVEY, METHOD_STRUCTURED_SMS] FIRE_TIME_DEFAULT = "DEFAULT" FIRE_TIME_CASE_PROPERTY = "CASE_PROPERTY" FIRE_TIME_RANDOM = "RANDOM" FIRE_TIME_CHOICES = [FIRE_TIME_DEFAULT, FIRE_TIME_CASE_PROPERTY, FIRE_TIME_RANDOM] MATCH_EXACT = "EXACT" MATCH_REGEX = "REGEX" MATCH_ANY_VALUE = "ANY_VALUE" MATCH_TYPE_CHOICES = [MATCH_EXACT, MATCH_REGEX, MATCH_ANY_VALUE] CASE_CRITERIA = "CASE_CRITERIA" ON_DATETIME = "ON_DATETIME" START_CONDITION_TYPES = [CASE_CRITERIA, ON_DATETIME] SURVEY_METHOD_LIST = ["SMS","CATI"] UI_FREQUENCY_ADVANCED = "ADVANCED" UI_FREQUENCY_CHOICES = [UI_FREQUENCY_ADVANCED] QUESTION_RETRY_CHOICES = [1, 2, 3, 4, 5] FORM_TYPE_ONE_BY_ONE = "ONE_BY_ONE" # Answer each question one at a time FORM_TYPE_ALL_AT_ONCE = "ALL_AT_ONCE" # Complete the entire form with just one sms using the delimiter to separate answers FORM_TYPE_CHOICES = [FORM_TYPE_ONE_BY_ONE, FORM_TYPE_ALL_AT_ONCE] REMINDER_TYPE_ONE_TIME = "ONE_TIME" REMINDER_TYPE_KEYWORD_INITIATED = "KEYWORD_INITIATED" REMINDER_TYPE_DEFAULT = "DEFAULT" REMINDER_TYPE_SURVEY_MANAGEMENT = "SURVEY_MANAGEMENT" REMINDER_TYPE_CHOICES = [REMINDER_TYPE_DEFAULT, REMINDER_TYPE_ONE_TIME, REMINDER_TYPE_KEYWORD_INITIATED, REMINDER_TYPE_SURVEY_MANAGEMENT] SEND_NOW = "NOW" SEND_LATER = "LATER" # This time is used when the case property used to specify the reminder time isn't a valid time # TODO: Decide whether to keep this or retire the reminder DEFAULT_REMINDER_TIME = time(12, 0) def is_true_value(val): return val == 'ok' or val == 'OK' def looks_like_timestamp(value): try: regex = re.compile("^\d\d\d\d-\d\d-\d\d.*$") return (regex.match(value) is not None) except Exception: return False def property_references_parent(case_property): return isinstance(case_property, basestring) and case_property.startswith("parent/") def get_case_property(case, case_property): """ case the case case_property the name of the case property (can be 'parent/property' to lookup on the parent, or 'property' to lookup on the case) """ if case_property is None or case is None: return None elif property_references_parent(case_property): parent_case = case.parent if parent_case is None: return None else: return parent_case.get_case_property(case_property[7:]) else: return case.get_case_property(case_property) def case_matches_criteria(case, match_type, case_property, value_to_match): result = False case_property_value = get_case_property(case, case_property) if match_type == MATCH_EXACT: result = (case_property_value == value_to_match) and (value_to_match is not None) elif match_type == MATCH_ANY_VALUE: result = case_property_value is not None elif match_type == MATCH_REGEX: try: regex = re.compile(value_to_match) result = regex.match(str(case_property_value)) is not None except Exception: result = False return result def get_events_scheduling_info(events): """ Return a list of events as dictionaries, only with information pertinent to scheduling changes. """ result = [] for e in events: result.append({ "day_num": e.day_num, "fire_time": e.fire_time, "fire_time_aux": e.fire_time_aux, "fire_time_type": e.fire_time_type, "time_window_length": e.time_window_length, "callback_timeout_intervals": e.callback_timeout_intervals, "form_unique_id": e.form_unique_id, }) return result class MessageVariable(object): def __init__(self, variable): self.variable = variable def __unicode__(self): return unicode(self.variable) @property def days_until(self): try: variable = string_to_datetime(self.variable) except Exception: return "(?)" else: # add 12 hours and then floor == round to the nearest day return (variable - datetime.utcnow() + timedelta(hours=12)).days def __getattr__(self, item): try: return super(MessageVariable, self).__getattribute__(item) except Exception: pass try: return MessageVariable(getattr(self.variable, item)) except Exception: pass try: return MessageVariable(self.variable[item]) except Exception: pass return "(?)" class Message(object): def __init__(self, template, **params): self.template = template self.params = {} for key, value in params.items(): self.params[key] = MessageVariable(value) def __unicode__(self): return self.template.format(**self.params) @classmethod def render(cls, template, **params): if isinstance(template, str): template = unicode(template, encoding='utf-8') return unicode(cls(template, **params)) class CaseReminderEvent(DocumentSchema): """ A CaseReminderEvent is the building block for representing reminder schedules in a CaseReminderHandler (see CaseReminderHandler.events). day_num See CaseReminderHandler, depends on event_interpretation. fire_time See CaseReminderHandler, depends on event_interpretation. fire_time_aux Usage depends on fire_time_type. fire_time_type FIRE_TIME_DEFAULT: the event will be scheduled at the time specified by fire_time. FIRE_TIME_CASE_PROPERTY: the event will be scheduled at the time specified by the case property named in fire_time_aux. FIRE_TIME_RANDOM: the event will be scheduled at a random minute on the interval that starts with fire_time and lasts for time_window_length minutes time_window_length Used in FIRE_TIME_RANDOM to define a time interval that starts at fire_time and lasts for this many minutes message The text to send along with language to send it, represented as a dictionary: {"en": "Hello, {user.full_name}, you're having issues."} callback_timeout_intervals For CaseReminderHandlers whose method is "callback", a list of timeout intervals (in minutes). The message is resent based on the number of entries in this list until the callback is received, or the number of timeouts is exhausted. form_unique_id For CaseReminderHandlers whose method is "survey", this the unique id of the form to play as a survey. """ day_num = IntegerProperty() fire_time = TimeProperty() fire_time_aux = StringProperty() fire_time_type = StringProperty(choices=FIRE_TIME_CHOICES, default=FIRE_TIME_DEFAULT) time_window_length = IntegerProperty() message = DictProperty() callback_timeout_intervals = ListProperty(IntegerProperty) form_unique_id = StringProperty() def run_rule(case_id, handler, schedule_changed, prev_definition): case = CommCareCase.get(case_id) try: handler.case_changed(case, schedule_changed=schedule_changed, prev_definition=prev_definition) except ResourceConflict: # Sometimes the reminder fires in the middle of reprocessing # the scheduling. handler.case_changed(case, schedule_changed=schedule_changed, prev_definition=prev_definition) try: client = get_redis_client() client.incr("reminder-rule-processing-current-%s" % handler._id) except: pass def retire_reminder(reminder_id): r = CaseReminder.get(reminder_id) r.retire() def get_case_ids(domain): """ Had to add this because this query kept intermittently raising "NoMoreData: Can't parse headers" exceptions. """ max_tries = 5 for i in range(max_tries): try: return get_case_ids_in_domain(domain) except Exception: if i == (max_tries - 1): raise class CaseReminderHandler(Document): """ A CaseReminderHandler defines the rules and schedule which govern how messages should go out. The "start" and "until" attributes will spawn and deactivate a CaseReminder for a CommCareCase, respectively, when their conditions are reached. Below both are described in more detail: start This defines when the reminder schedule kicks off. Examples: start="edd" - The reminder schedule kicks off for a CommCareCase on the date defined by the CommCareCase's "edd" property. start="form_started" - The reminder schedule kicks off for a CommCareCase when the CommCareCase's "form_started" property equals "ok". until This defines when the reminders should stop being sent. Once this condition is reached, the CaseReminder is deactivated. Examples: until="followup_1_complete" - The reminders will stop being sent for a CommCareCase when the CommCareCase's "followup_1_complete" property equals "ok". Once a CaseReminder is spawned (i.e., when the "start" condition is met for a CommCareCase), the intervals at which reminders are sent and the messages sent are defined by the "events" attribute on the CaseReminderHandler. One complete cycle through all events is considered to be an "iteration", and the attribute that defines the maximum number of iterations for this schedule is "max_iteration_count". Reminder messages will continue to be sent until the events cycle has occurred "max_iteration_count" times, or until the "until" condition is met, whichever comes first. To ignore the "max_iteration_count", it can be set to REPEAT_SCHEDULE_INDEFINITELY, in which case only the "until" condition stops the reminder messages. The events can either be interpreted as offsets from each other and from the original "start" condition, or as fixed schedule times from the original "start" condition: Example of "event_interpretation" == EVENT_AS_OFFSET: start = "form1_completed" start_offset = 1 events = [ CaseReminderEvent( day_num = 0 ,fire_time = time(hour=1) ,message = {"en": "Form not yet completed."} ) ] schedule_length = 0 event_interpretation = EVENT_AS_OFFSET max_iteration_count = REPEAT_SCHEDULE_INDEFINITELY until = "form2_completed" This CaseReminderHandler can be used to send an hourly message starting one day (start_offset=1) after "form1_completed", and will keep sending the message every hour until "form2_completed". So, if "form1_completed" is reached on January 1, 2012, at 9:46am, the reminders will begin being sent at January 2, 2012, at 10:46am and every hour subsequently until "form2_completed". Specifically, when "event_interpretation" is EVENT_AS_OFFSET: day_num is interpreted to be a number of days after the last fire fire_time is interpreted to be a number of hours, minutes, and seconds after the last fire schedule_length is interpreted to be a number of days between the last event and the beginning of a new iteration Example of "event_interpretation" == EVENT_AS_SCHEDULE: start = "regimen_started" start_offset = 1 events = [ CaseReminderEvent( day_num = 1 ,fire_time = time(11,00) ,message = {"en": "Form not yet completed."} ) ,CaseReminderEvent( day_num = 4 ,fire_time = time(11,00) ,message = {"en": "Form not yet completed."} ) ] schedule_length = 7 event_interpretation = EVENT_AS_SCHEDULE max_iteration_count = 4 until = "ignore_this_attribute" This CaseReminderHandler can be used to send reminders at 11:00am on days 2 and 5 of a weekly schedule (schedule_length=7), for 4 weeks (max_iteration_count=4). "Day 1" of the weekly schedule is considered to be one day (start_offset=1) after "regimen_started". So, if "regimen_started" is reached on a Sunday, the days of the week will be Monday=1, Tuesday=2, etc., and the reminders will be sent on Tuesday and Friday of each week, for 4 weeks. Specifically, when "event_interpretation" is EVENT_AS_SCHEDULE: day_num is interpreted to be a the number of days since the current event cycle began fire_time is interpreted to be the time of day to fire the reminder schedule_length is interpreted to be the length of the event cycle, in days Below is a description of the remaining attributes for a CaseReminderHandler: domain The domain to which this CaseReminderHandler belongs. Only CommCareCases belonging to this domain will be checked for the "start" and "until" conditions. case_type Only CommCareCases whose "type" attribute matches this attribute will be checked for the "start" and "until" conditions. nickname A simple name used to describe this CaseReminderHandler. default_lang Default language to use in case no translation is found for the recipient's language. method Set to "sms" to send simple sms reminders at the proper intervals. Set to "callback" to send sms reminders and to enable the checked of "callback_timeout_intervals" on each event. ui_type The type of UI to use for editing this CaseReminderHandler (see UI_CHOICES) """ domain = StringProperty() last_modified = DateTimeProperty() active = BooleanProperty(default=True) case_type = StringProperty() nickname = StringProperty() default_lang = StringProperty() method = StringProperty(choices=METHOD_CHOICES, default="sms") ui_type = StringProperty(choices=UI_CHOICES, default=UI_SIMPLE_FIXED) recipient = StringProperty(choices=RECIPIENT_CHOICES, default=RECIPIENT_USER) ui_frequency = StringProperty(choices=UI_FREQUENCY_CHOICES, default=UI_FREQUENCY_ADVANCED) # This will be used to simplify the scheduling process in the ui sample_id = StringProperty() user_group_id = StringProperty() user_id = StringProperty() case_id = StringProperty() reminder_type = StringProperty(choices=REMINDER_TYPE_CHOICES, default=REMINDER_TYPE_DEFAULT) locked = BooleanProperty(default=False) # Only used when recipient is RECIPIENT_SUBCASE. # All subcases matching the given criteria will be the recipients. recipient_case_match_property = StringProperty() recipient_case_match_type = StringProperty(choices=MATCH_TYPE_CHOICES) recipient_case_match_value = StringProperty() # Only applies when method is "survey". # If this is True, on the last survey timeout, instead of resending the current question, # it will submit the form for the recipient with whatever is completed up to that point. submit_partial_forms = BooleanProperty(default=False) # Only applies when submit_partial_forms is True. # If this is True, partial form submissions will be allowed to create / update / close cases. # If this is False, partial form submissions will just submit the form without case create / update / close. include_case_side_effects = BooleanProperty(default=False) # Only applies for method = "ivr_survey" right now. # This is the maximum number of times that it will retry asking a question with an invalid response before hanging # up. This is meant to prevent long running calls. max_question_retries = IntegerProperty(choices=QUESTION_RETRY_CHOICES, default=QUESTION_RETRY_CHOICES[-1]) survey_incentive = StringProperty() # start condition start_condition_type = StringProperty(choices=START_CONDITION_TYPES, default=CASE_CRITERIA) # used when start_condition_type == ON_DATETIME start_datetime = DateTimeProperty() # used when start_condition_type == CASE_CRITERIA start_property = StringProperty() start_value = StringProperty() start_date = StringProperty() start_offset = IntegerProperty() start_match_type = StringProperty(choices=MATCH_TYPE_CHOICES) start_day_of_week = IntegerProperty(choices=DAY_OF_WEEK_CHOICES, default=DAY_ANY) # reminder schedule events = SchemaListProperty(CaseReminderEvent) schedule_length = IntegerProperty() event_interpretation = StringProperty(choices=EVENT_INTERPRETATIONS, default=EVENT_AS_OFFSET) max_iteration_count = IntegerProperty() # stop condition until = StringProperty() # If present, references an entry in settings.ALLOWED_CUSTOM_CONTENT_HANDLERS, which maps to a function # that should be called to retrieve the sms content to send in an sms reminder. # The signature of a custom content handler should be function(reminder, handler, recipient) custom_content_handler = StringProperty() # If a subcase triggers an SMS survey, but we're sending it to the parent case, # we sometimes want the subcase to be the one on which we execute case actions # during form submission. This option will allow for that. # Note that this option only makes a difference if a case is filling out the SMS survey, # and if a case other than that case triggered the reminder. force_surveys_to_use_triggered_case = BooleanProperty(default=False) @property def uses_parent_case_property(self): events_use_parent_case_property = False for event in self.events: if event.fire_time_type == FIRE_TIME_CASE_PROPERTY and property_references_parent(event.fire_time_aux): events_use_parent_case_property = True break return ( events_use_parent_case_property or property_references_parent(self.recipient_case_match_property) or property_references_parent(self.start_property) or property_references_parent(self.start_date) or property_references_parent(self.until) ) @property def uses_time_case_property(self): for event in self.events: if event.fire_time_type == FIRE_TIME_CASE_PROPERTY: return True return False @classmethod def get_now(cls): try: # for testing purposes only! return getattr(cls, 'now') except Exception: return datetime.utcnow() def schedule_has_changed(self, old_definition): """ Returns True if the scheduling information in self is different from the scheduling information in old_definition. old_definition - the CaseReminderHandler to compare to """ return ( get_events_scheduling_info(old_definition.events) != get_events_scheduling_info(self.events) or old_definition.start_offset != self.start_offset or old_definition.schedule_length != self.schedule_length or old_definition.max_iteration_count != self.max_iteration_count ) def get_reminder(self, case): domain = self.domain handler_id = self._id case_id = case._id return CaseReminder.view('reminders/by_domain_handler_case', key=[domain, handler_id, case_id], include_docs=True, ).one() def get_reminders(self, ids_only=False): domain = self.domain handler_id = self._id include_docs = not ids_only result = CaseReminder.view('reminders/by_domain_handler_case', startkey=[domain, handler_id], endkey=[domain, handler_id, {}], include_docs=include_docs, ).all() if ids_only: return [entry["id"] for entry in result] else: return result def get_day_of_week_offset(self, dt, day_of_week): offset = 0 while dt.weekday() != day_of_week: offset += 1 dt = dt + timedelta(days=1) return offset # For use with event_interpretation = EVENT_AS_SCHEDULE def get_current_reminder_event_timestamp(self, reminder, recipient, case): event = self.events[reminder.current_event_sequence_num] additional_minute_offset = 0 if event.fire_time_type == FIRE_TIME_DEFAULT: fire_time = event.fire_time elif event.fire_time_type == FIRE_TIME_CASE_PROPERTY: fire_time = get_case_property(case, event.fire_time_aux) try: fire_time = parse(fire_time).time() except Exception: fire_time = DEFAULT_REMINDER_TIME elif event.fire_time_type == FIRE_TIME_RANDOM: additional_minute_offset = randint(0, event.time_window_length - 1) + (event.fire_time.hour * 60) + event.fire_time.minute fire_time = time(0, 0) else: fire_time = DEFAULT_REMINDER_TIME day_offset = self.start_offset + (self.schedule_length * (reminder.schedule_iteration_num - 1)) + event.day_num start_date = reminder.start_date + timedelta(days=day_offset) day_of_week_offset = 0 if self.start_day_of_week != DAY_ANY: day_of_week_offset = self.get_day_of_week_offset(start_date, self.start_day_of_week) timestamp = (datetime.combine(start_date, fire_time) + timedelta(days=day_of_week_offset) + timedelta(minutes=additional_minute_offset)) return CaseReminderHandler.timestamp_to_utc(recipient, timestamp) def spawn_reminder(self, case, now, recipient=None): """ Creates a CaseReminder. case The CommCareCase for which to create the CaseReminder. now The date and time to kick off the CaseReminder. This is the date and time from which all offsets are calculated. return The CaseReminder """ if recipient is None: if self.recipient == RECIPIENT_USER: recipient = CouchUser.get_by_user_id(case.user_id) elif self.recipient == RECIPIENT_CASE: recipient = CommConnectCase.get(case._id) elif self.recipient == RECIPIENT_PARENT_CASE: if case is not None and case.parent is not None: recipient = CommConnectCase.wrap_as_commconnect_case(case.parent) local_now = CaseReminderHandler.utc_to_local(recipient, now) case_id = case._id if case is not None else None user_id = recipient._id if self.recipient == RECIPIENT_USER and recipient is not None else None sample_id = recipient._id if self.recipient == RECIPIENT_SURVEY_SAMPLE else None reminder = CaseReminder( domain=self.domain, case_id=case_id, handler_id=self._id, user_id=user_id, method=self.method, active=True, start_date=date(now.year, now.month, now.day) if (now.hour == 0 and now.minute == 0 and now.second == 0 and now.microsecond == 0) else date(local_now.year,local_now.month,local_now.day), schedule_iteration_num=1, current_event_sequence_num=0, callback_try_count=0, skip_remaining_timeouts=False, sample_id=sample_id, xforms_session_ids=[], ) # Set the first fire time appropriately if self.event_interpretation == EVENT_AS_OFFSET: # EVENT_AS_OFFSET day_offset = self.start_offset + self.events[0].day_num time_offset = self.events[0].fire_time reminder.next_fire = now + timedelta(days=day_offset, hours=time_offset.hour, minutes=time_offset.minute, seconds=time_offset.second) else: # EVENT_AS_SCHEDULE reminder.next_fire = self.get_current_reminder_event_timestamp(reminder, recipient, case) return reminder @classmethod def utc_to_local(cls, contact, timestamp): """ Converts the given naive datetime from UTC to the contact's time zone. contact The contact whose time zone to use (must be an instance of CommCareMobileContactMixin). timestamp The naive datetime. return The converted timestamp, as a naive datetime. """ try: time_zone = timezone(str(contact.get_time_zone())) utc_datetime = pytz.utc.localize(timestamp) local_datetime = utc_datetime.astimezone(time_zone) naive_local_datetime = local_datetime.replace(tzinfo=None) return naive_local_datetime except Exception: return timestamp @classmethod def timestamp_to_utc(cls, contact, timestamp): """ Converts the given naive datetime from the contact's time zone to UTC. contact The contact whose time zone to use (must be an instance of CommCareMobileContactMixin). timestamp The naive datetime. return The converted timestamp, as a naive datetime. """ try: time_zone = timezone(str(contact.get_time_zone())) local_datetime = time_zone.localize(timestamp) utc_datetime = local_datetime.astimezone(pytz.utc) naive_utc_datetime = utc_datetime.replace(tzinfo=None) return naive_utc_datetime except Exception: return timestamp def move_to_next_event(self, reminder): """ Moves the given CaseReminder to the next event specified by its CaseReminderHandler. If the CaseReminder is on the last event in the cycle, it moves to the first event in the cycle. If the CaseReminderHandler's max_iteration_count is not REPEAT_SCHEDULE_INDEFINITELY and the CaseReminder is on the last event in the event cycle, the CaseReminder is also deactivated. reminder The CaseReminder to move to the next event. return void """ reminder.current_event_sequence_num += 1 reminder.callback_try_count = 0 reminder.skip_remaining_timeouts = False reminder.xforms_session_ids = [] reminder.event_initiation_timestamp = None if reminder.current_event_sequence_num >= len(self.events): reminder.current_event_sequence_num = 0 reminder.schedule_iteration_num += 1 def set_next_fire(self, reminder, now): """ Sets reminder.next_fire to the next allowable date after now by continuously moving the given CaseReminder to the next event (using move_to_next_event() above) and setting the CaseReminder's next_fire attribute accordingly until the next_fire > the now parameter. This is done to skip reminders that were never sent (such as when reminders are deactivated for a while), instead of sending one reminder every minute until they're all made up for. reminder The CaseReminder whose next_fire to set. now The date and time after which reminder.next_fire must be before returning. return void """ case = reminder.case recipient = reminder.recipient iteration = 0 reminder.error_retry_count = 0 # Reset next_fire to its last scheduled fire time in case there were any error retries if reminder.last_scheduled_fire_time is not None: reminder.next_fire = reminder.last_scheduled_fire_time while now >= reminder.next_fire and reminder.active: iteration += 1 # If it is a callback reminder, check the callback_timeout_intervals if (self.method in [METHOD_SMS_CALLBACK, METHOD_SMS_SURVEY, METHOD_IVR_SURVEY] and len(reminder.current_event.callback_timeout_intervals) > 0): if reminder.skip_remaining_timeouts or reminder.callback_try_count >= len(reminder.current_event.callback_timeout_intervals): if self.method == METHOD_SMS_SURVEY and self.submit_partial_forms and iteration > 1: # This is to make sure we submit the unfinished forms even when fast-forwarding to the next event after system downtime for session_id in reminder.xforms_session_ids: submit_unfinished_form(session_id, self.include_case_side_effects) else: reminder.next_fire = reminder.next_fire + timedelta(minutes = reminder.current_event.callback_timeout_intervals[reminder.callback_try_count]) reminder.callback_try_count += 1 continue # Move to the next event in the cycle self.move_to_next_event(reminder) # Set the next fire time if self.event_interpretation == EVENT_AS_OFFSET: # EVENT_AS_OFFSET next_event = reminder.current_event day_offset = next_event.day_num if reminder.current_event_sequence_num == 0: day_offset += self.schedule_length time_offset = next_event.fire_time reminder.next_fire += timedelta(days=day_offset, hours=time_offset.hour, minutes=time_offset.minute, seconds=time_offset.second) else: # EVENT_AS_SCHEDULE reminder.next_fire = self.get_current_reminder_event_timestamp(reminder, recipient, case) # Set whether or not the reminder should still be active reminder.active = self.get_active(reminder, reminder.next_fire, case) # Preserve the current next fire time since next_fire can be manipulated for error retries reminder.last_scheduled_fire_time = reminder.next_fire def recalculate_schedule(self, reminder, prev_definition=None): """ Recalculates which iteration / event number a schedule-based reminder should be on. Only meant to be called on schedule-based reminders. """ if reminder.callback_try_count > 0 and prev_definition is not None and len(prev_definition.events) > reminder.current_event_sequence_num: preserve_current_session_ids = True old_form_unique_id = prev_definition.events[reminder.current_event_sequence_num].form_unique_id old_xforms_session_ids = reminder.xforms_session_ids else: preserve_current_session_ids = False case = reminder.case reminder.last_fired = None reminder.error_retry_count = 0 reminder.event_initiation_timestamp = None reminder.active = True reminder.schedule_iteration_num = 1 reminder.current_event_sequence_num = 0 reminder.callback_try_count = 0 reminder.skip_remaining_timeouts = False reminder.last_scheduled_fire_time = None reminder.xforms_session_ids = [] reminder.next_fire = self.get_current_reminder_event_timestamp(reminder, reminder.recipient, case) reminder.active = self.get_active(reminder, reminder.next_fire, case) self.set_next_fire(reminder, self.get_now()) if preserve_current_session_ids: if reminder.callback_try_count > 0 and self.events[reminder.current_event_sequence_num].form_unique_id == old_form_unique_id and self.method == METHOD_SMS_SURVEY: reminder.xforms_session_ids = old_xforms_session_ids elif prev_definition is not None and prev_definition.submit_partial_forms: for session_id in old_xforms_session_ids: submit_unfinished_form(session_id, prev_definition.include_case_side_effects) def get_active(self, reminder, now, case): schedule_not_finished = not (self.max_iteration_count != REPEAT_SCHEDULE_INDEFINITELY and reminder.schedule_iteration_num > self.max_iteration_count) if case is not None: until_not_reached = (not self.condition_reached(case, self.until, now)) return until_not_reached and schedule_not_finished else: return schedule_not_finished def should_fire(self, reminder, now): return now > reminder.next_fire def fire(self, reminder): """ Sends the message associated with the given CaseReminder's current event. reminder The CaseReminder which to fire. return True on success, False on failure """ # Prevent circular import from .event_handlers import EVENT_HANDLER_MAP if self.deleted(): reminder.retire() return False # Retrieve the list of individual recipients recipient = reminder.recipient if isinstance(recipient, list) and len(recipient) > 0: recipients = recipient elif isinstance(recipient, CouchUser) or isinstance(recipient, CommCareCase): recipients = [recipient] elif isinstance(recipient, Group): recipients = recipient.get_users(is_active=True, only_commcare=False) elif isinstance(recipient, CommCareCaseGroup): recipients = [CommConnectCase.get(case_id) for case_id in recipient.cases] else: from corehq.apps.reminders.event_handlers import raise_error, ERROR_NO_RECIPIENTS raise_error(reminder, ERROR_NO_RECIPIENTS) return False # Retrieve the corresponding verified number entries for all individual recipients verified_numbers = {} for r in recipients: if hasattr(r, "get_verified_numbers"): contact_verified_numbers = r.get_verified_numbers(False) if len(contact_verified_numbers) > 0: verified_number = sorted(contact_verified_numbers.iteritems())[0][1] else: verified_number = None else: verified_number = None verified_numbers[r.get_id] = verified_number # Set the event initiation timestamp if we're not on any timeouts if reminder.callback_try_count == 0: reminder.event_initiation_timestamp = self.get_now() # Call the appropriate event handler event_handler = EVENT_HANDLER_MAP.get(self.method) last_fired = self.get_now() # Store the timestamp right before firing to ensure continuity in the callback lookups result = event_handler(reminder, self, recipients, verified_numbers) reminder.last_fired = last_fired return result @classmethod def condition_reached(cls, case, case_property, now): """ Checks to see if the condition specified by case_property on case has been reached. If case[case_property] is a timestamp and it is later than now, then the condition is reached. If case[case_property] equals "ok", then the condition is reached. case The CommCareCase to check. case_property The property on CommCareCase to check. now The timestamp to use when comparing, if case.case_property is a timestamp. return True if the condition is reached, False if not. """ condition = get_case_property(case, case_property) if isinstance(condition, datetime): pass elif isinstance(condition, date): condition = datetime.combine(condition, time(0,0)) elif looks_like_timestamp(condition): try: condition = parse(condition) except Exception: pass if isinstance(condition, datetime) and getattr(condition, "tzinfo") is not None: condition = condition.astimezone(pytz.utc) condition = condition.replace(tzinfo=None) if (isinstance(condition, datetime) and now > condition) or is_true_value(condition): return True else: return False def case_changed(self, case, now=None, schedule_changed=False, prev_definition=None): key = "rule-update-definition-%s-case-%s" % (self._id, case._id) with CriticalSection([key]): self._case_changed(case, now, schedule_changed, prev_definition) def _case_changed(self, case, now, schedule_changed, prev_definition): """ This method is used to manage updates to CaseReminderHandler's whose start_condition_type == CASE_CRITERIA. This method is also called every time a CommCareCase is saved and matches this CaseReminderHandler's domain and case_type. It's used to check for the "start" and "until" conditions in order to spawn or deactivate a CaseReminder for the CommCareCase. case The case that is being updated. now The current date and time to use; if not specified, datetime.utcnow() is used. return void """ now = now or self.get_now() reminder = self.get_reminder(case) if case and case.user_id and (case.user_id != case._id): try: user = CouchUser.get_by_user_id(case.user_id) except KeyError: user = None else: user = None if (case.closed or case.type != self.case_type or case.doc_type.endswith("-Deleted") or self.deleted() or (self.recipient == RECIPIENT_USER and not user)): if reminder: reminder.retire() else: start_condition_reached = case_matches_criteria(case, self.start_match_type, self.start_property, self.start_value) start_date = get_case_property(case, self.start_date) if (not isinstance(start_date, date)) and not (isinstance(start_date, datetime)): try: start_date = parse(start_date) except Exception: start_date = None if isinstance(start_date, datetime): start_condition_datetime = start_date start = start_date elif isinstance(start_date, date): start_condition_datetime = datetime(start_date.year, start_date.month, start_date.day, 0, 0, 0) start = start_condition_datetime else: start_condition_datetime = None start = now # Retire the reminder if the start condition is no longer valid if reminder is not None: if not start_condition_reached: # The start condition is no longer valid, so retire the reminder reminder.retire() reminder = None elif reminder.start_condition_datetime != start_condition_datetime: # The start date has changed, so retire the reminder and it will be spawned again in the next block reminder.retire() reminder = None # Spawn a reminder if need be just_spawned = False if reminder is None: if start_condition_reached: reminder = self.spawn_reminder(case, start) reminder.start_condition_datetime = start_condition_datetime self.set_next_fire(reminder, now) # This will fast-forward to the next event that does not occur in the past just_spawned = True # Check to see if the reminder should still be active if reminder is not None: if schedule_changed and self.event_interpretation == EVENT_AS_SCHEDULE and not just_spawned: self.recalculate_schedule(reminder, prev_definition) else: active = self.get_active(reminder, reminder.next_fire, case) if active and not reminder.active: reminder.active = True self.set_next_fire(reminder, now) # This will fast-forward to the next event that does not occur in the past else: reminder.active = active reminder.active = self.active and reminder.active reminder.save() def datetime_definition_changed(self, send_immediately=False): """ This method is used to manage updates to CaseReminderHandler's whose start_condition_type == ON_DATETIME. Set send_immediately to True to send the first event right away, regardless of whether it may occur in the past. """ reminder = CaseReminder.view('reminders/by_domain_handler_case', startkey=[self.domain, self._id], endkey=[self.domain, self._id, {}], include_docs=True ).one() now = self.get_now() if self.recipient == RECIPIENT_SURVEY_SAMPLE: recipient = CommCareCaseGroup.get(self.sample_id) elif self.recipient == RECIPIENT_USER_GROUP: recipient = Group.get(self.user_group_id) elif self.recipient == RECIPIENT_USER: recipient = CouchUser.get_by_user_id(self.user_id) elif self.recipient == RECIPIENT_CASE: recipient = CommCareCase.get(self.case_id) else: recipient = None if reminder is not None and (reminder.start_condition_datetime != self.start_datetime or not self.active): reminder.retire() reminder = None if reminder is None and self.active: if self.recipient == RECIPIENT_CASE: case = recipient elif self.case_id is not None: case = CommCareCase.get(self.case_id) else: case = None reminder = self.spawn_reminder(case, self.start_datetime, recipient) reminder.start_condition_datetime = self.start_datetime if settings.REMINDERS_QUEUE_ENABLED: reminder.save() if send_immediately: enqueue_reminder_directly(reminder) else: sent = False if send_immediately: try: sent = self.fire(reminder) except Exception: # An exception could happen here, for example, if # touchforms is down. So just pass, and let the reminder # be saved below so that the framework will pick it up # and try again. notify_exception(None, message="Error sending immediately for handler %s" % self._id) if sent or not send_immediately: self.set_next_fire(reminder, now) reminder.save() def check_state(self): """ Double-checks the model for any inconsistencies and raises an IllegalModelStateException if any exist. """ def check_attr(name, obj=self): # don't allow None or empty string, but allow 0 if getattr(obj, name) in [None, ""]: raise IllegalModelStateException("%s is required" % name) if self.start_condition_type == CASE_CRITERIA: check_attr("case_type") check_attr("start_property") check_attr("start_match_type") if self.start_match_type != MATCH_ANY_VALUE: check_attr("start_value") if self.start_condition_type == ON_DATETIME: check_attr("start_datetime") if self.method == METHOD_SMS: check_attr("default_lang") check_attr("schedule_length") check_attr("max_iteration_count") check_attr("start_offset") if len(self.events) == 0: raise IllegalModelStateException("len(events) must be > 0") last_day = 0 for event in self.events: check_attr("day_num", obj=event) if event.day_num < 0: raise IllegalModelStateException("event.day_num must be " "non-negative") if event.fire_time_type in [FIRE_TIME_DEFAULT, FIRE_TIME_RANDOM]: check_attr("fire_time", obj=event) if event.fire_time_type == FIRE_TIME_RANDOM: check_attr("time_window_length", obj=event) if event.fire_time_type == FIRE_TIME_CASE_PROPERTY: check_attr("fire_time_aux", obj=event) if self.method == METHOD_SMS and not self.custom_content_handler: if not isinstance(event.message, dict): raise IllegalModelStateException("event.message expected " "to be a dictionary") if self.default_lang not in event.message: raise IllegalModelStateException("default_lang missing " "from event.message") if self.method in [METHOD_SMS_SURVEY, METHOD_IVR_SURVEY]: check_attr("form_unique_id", obj=event) if not isinstance(event.callback_timeout_intervals, list): raise IllegalModelStateException("event." "callback_timeout_intervals expected to be a list") last_day = event.day_num if self.event_interpretation == EVENT_AS_SCHEDULE: if self.schedule_length <= last_day: raise IllegalModelStateException("schedule_length must be " "greater than last event's day_num") else: if self.schedule_length < 0: raise IllegalModelStateException("schedule_length must be" "non-negative") if self.recipient == RECIPIENT_SUBCASE: check_attr("recipient_case_match_property") check_attr("recipient_case_match_type") if self.recipient_case_match_type != MATCH_ANY_VALUE: check_attr("recipient_case_match_value") if (self.custom_content_handler and self.custom_content_handler not in settings.ALLOWED_CUSTOM_CONTENT_HANDLERS): raise IllegalModelStateException("unknown custom_content_handler") self.check_min_tick() def check_min_tick(self, minutes=60): """ For offset-based schedules that are repeated multiple times intraday, makes sure that the events are separated by at least the given number of minutes. """ if (self.event_interpretation == EVENT_AS_OFFSET and self.max_iteration_count != 1 and self.schedule_length == 0): minimum_tick = None for e in self.events: this_tick = timedelta(days=e.day_num, hours=e.fire_time.hour, minutes=e.fire_time.minute) if minimum_tick is None: minimum_tick = this_tick elif this_tick < minimum_tick: minimum_tick = this_tick if minimum_tick < timedelta(minutes=minutes): raise IllegalModelStateException("Minimum tick for a schedule " "repeated multiple times intraday is %s minutes." % minutes) def save(self, **params): from corehq.apps.reminders.tasks import process_reminder_rule self.check_state() schedule_changed = params.pop("schedule_changed", False) prev_definition = params.pop("prev_definition", None) send_immediately = params.pop("send_immediately", False) unlock = params.pop("unlock", False) self.last_modified = datetime.utcnow() if unlock: self.locked = False else: self.locked = True super(CaseReminderHandler, self).save(**params) delay = self.start_condition_type == CASE_CRITERIA if not unlock: if delay: process_reminder_rule.delay(self, schedule_changed, prev_definition, send_immediately) else: process_reminder_rule(self, schedule_changed, prev_definition, send_immediately) def process_rule(self, schedule_changed, prev_definition, send_immediately): if not self.deleted(): if self.start_condition_type == CASE_CRITERIA: case_ids = get_case_ids(self.domain) try: client = get_redis_client() client.set("reminder-rule-processing-current-%s" % self._id, 0) client.set("reminder-rule-processing-total-%s" % self._id, len(case_ids)) except: pass process_fast(case_ids, run_rule, item_goal=100, max_threads=5, args=(self, schedule_changed, prev_definition), use_critical_section=False, print_stack_interval=60) elif self.start_condition_type == ON_DATETIME: self.datetime_definition_changed(send_immediately=send_immediately) else: reminder_ids = self.get_reminders(ids_only=True) process_fast(reminder_ids, retire_reminder, item_goal=100, max_threads=5, use_critical_section=False, print_stack_interval=60) @classmethod def get_handlers(cls, domain, reminder_type_filter=None): ids = cls.get_handler_ids(domain, reminder_type_filter=reminder_type_filter) return cls.get_handlers_from_ids(ids) @classmethod def get_handlers_from_ids(cls, ids): return [ CaseReminderHandler.wrap(doc) for doc in iter_docs(cls.get_db(), ids) ] @classmethod def get_handler_ids(cls, domain, reminder_type_filter=None): result = cls.view('reminders/handlers_by_reminder_type', startkey=[domain], endkey=[domain, {}], include_docs=False, reduce=False, ) def filter_fcn(reminder_type): if reminder_type_filter is None: return True else: return ((reminder_type or REMINDER_TYPE_DEFAULT) == reminder_type_filter) return [ row['id'] for row in result if filter_fcn(row['key'][1]) ] @classmethod def get_referenced_forms(cls, domain): handlers = cls.get_handlers(domain) referenced_forms = [e.form_unique_id for events in [h.events for h in handlers] for e in events] return filter(None, referenced_forms) @classmethod def get_all_reminders(cls, domain=None, due_before=None, ids_only=False): if due_before: now_json = json_format_datetime(due_before) else: now_json = {} # domain=None will actually get them all, so this works smoothly result = CaseReminder.view('reminders/by_next_fire', startkey=[domain], endkey=[domain, now_json], include_docs=(not ids_only), ).all() if ids_only: return [entry["id"] for entry in result] else: return result @classmethod def fire_reminders(cls, now=None): now = now or cls.get_now() for reminder in cls.get_all_reminders(due_before=now): if reminder.acquire_lock(now) and now >= reminder.next_fire: handler = reminder.handler if handler.fire(reminder): handler.set_next_fire(reminder, now) try: reminder.save() except ResourceConflict: # Submitting a form updates the case, which can update the reminder. # Grab the latest version of the reminder and set the next fire if it's still in use. reminder = CaseReminder.get(reminder._id) if not reminder.retired: handler.set_next_fire(reminder, now) reminder.save() try: reminder.release_lock() except ResourceConflict: # This should go away once we move the locking to Redis reminder = CaseReminder.get(reminder._id) reminder.release_lock() def retire(self): self.doc_type += "-Deleted" self.save() def deleted(self): return self.doc_type != 'CaseReminderHandler' class CaseReminder(SafeSaveDocument, LockableMixIn): """ Where the CaseReminderHandler is the rule and schedule for sending out reminders, a CaseReminder is an instance of that rule as it is being applied to a specific CommCareCase. A CaseReminder only applies to a single CommCareCase/CaseReminderHandler interaction and is just a representation of the state of the rule in the lifecycle of the CaseReminderHandler. """ domain = StringProperty() # Domain last_modified = DateTimeProperty() case_id = StringProperty() # Reference to the CommCareCase handler_id = StringProperty() # Reference to the CaseReminderHandler user_id = StringProperty() # Reference to the CouchUser who will receive the SMS messages method = StringProperty(choices=METHOD_CHOICES) # See CaseReminderHandler.method next_fire = DateTimeProperty() # The date and time that the next message should go out last_fired = DateTimeProperty() # The date and time that the last message went out active = BooleanProperty(default=False) # True if active, False if deactivated start_date = DateProperty() # For CaseReminderHandlers with event_interpretation=SCHEDULE, this is the date (in the recipient's time zone) from which all event times are calculated schedule_iteration_num = IntegerProperty() # The current iteration through the cycle of self.handler.events current_event_sequence_num = IntegerProperty() # The current event number (index to self.handler.events) callback_try_count = IntegerProperty() # Keeps track of the number of times a callback has timed out skip_remaining_timeouts = BooleanProperty() # An event handling method can set this to True to skip the remaining timeout intervals for the current event start_condition_datetime = DateTimeProperty() # The date and time matching the case property specified by the CaseReminderHandler.start_condition sample_id = StringProperty() xforms_session_ids = ListProperty(StringProperty) error_retry_count = IntegerProperty(default=0) last_scheduled_fire_time = DateTimeProperty() event_initiation_timestamp = DateTimeProperty() # The date and time that the event was started (which is the same throughout all timeouts) error = BooleanProperty(default=False) error_msg = StringProperty() @property def handler(self): return CaseReminderHandler.get(self.handler_id) @property def current_event(self): return self.handler.events[self.current_event_sequence_num] @property def case(self): if self.case_id is not None: return CommCareCase.get(self.case_id) else: return None @property def user(self): if self.handler.recipient == RECIPIENT_USER: return CouchUser.get_by_user_id(self.user_id) else: return None @property def recipient(self): try: return self._recipient_lookup except ResourceNotFound: return None @property def _recipient_lookup(self): handler = self.handler if handler.recipient == RECIPIENT_USER: return self.user elif handler.recipient == RECIPIENT_CASE: return CommConnectCase.get(self.case_id) elif handler.recipient == RECIPIENT_SURVEY_SAMPLE: return CommCareCaseGroup.get(self.sample_id) elif handler.recipient == RECIPIENT_OWNER: return get_wrapped_owner(get_owner_id(self.case)) elif handler.recipient == RECIPIENT_PARENT_CASE: parent_case = None case = self.case if case is not None: parent_case = case.parent if parent_case is not None: parent_case = CommConnectCase.wrap_as_commconnect_case(parent_case) return parent_case elif handler.recipient == RECIPIENT_SUBCASE: indices = self.case.reverse_indices recipients = [] for index in indices: if index.identifier == "parent": subcase = CommConnectCase.get(index.referenced_id) if case_matches_criteria(subcase, handler.recipient_case_match_type, handler.recipient_case_match_property, handler.recipient_case_match_value): recipients.append(subcase) return recipients elif handler.recipient == RECIPIENT_USER_GROUP: return Group.get(handler.user_group_id) else: return None @property def retired(self): return self.doc_type.endswith("-Deleted") def save(self, *args, **kwargs): self.last_modified = datetime.utcnow() super(CaseReminder, self).save(*args, **kwargs) def retire(self): self.doc_type += "-Deleted" self.save() class SurveyKeywordAction(DocumentSchema): recipient = StringProperty(choices=KEYWORD_RECIPIENT_CHOICES) recipient_id = StringProperty() action = StringProperty(choices=KEYWORD_ACTION_CHOICES) # Only used for action == METHOD_SMS message_content = StringProperty() # Only used for action in [METHOD_SMS_SURVEY, METHOD_STRUCTURED_SMS] form_unique_id = StringProperty() # Only used for action == METHOD_STRUCTURED_SMS use_named_args = BooleanProperty() named_args = DictProperty() # Dictionary of {argument name in the sms (caps) : form question xpath} named_args_separator = StringProperty() # Can be None in which case there is no separator (i.e., a100 b200) class SurveyKeyword(Document): domain = StringProperty() keyword = StringProperty() description = StringProperty() actions = SchemaListProperty(SurveyKeywordAction) delimiter = StringProperty() # Only matters if this is a structured SMS: default is None, in which case the delimiter is any consecutive white space override_open_sessions = BooleanProperty() initiator_doc_type_filter = ListProperty(StringProperty) # List of doc types representing the only types of contacts who should be able to invoke this keyword. Empty list means anyone can invoke. # Properties needed for migration and then can be removed form_type = StringProperty(choices=FORM_TYPE_CHOICES, default=FORM_TYPE_ONE_BY_ONE) form_unique_id = StringProperty() use_named_args = BooleanProperty() named_args = DictProperty() named_args_separator = StringProperty() oct13_migration_timestamp = DateTimeProperty() def retire(self): self.doc_type += "-Deleted" self.save() @property def get_id(self): return self._id @classmethod def get_all(cls, domain): return cls.view("reminders/survey_keywords", startkey=[domain], endkey=[domain, {}], include_docs=True, reduce=False, ).all() @classmethod def get_keyword(cls, domain, keyword): return cls.view("reminders/survey_keywords", key = [domain, keyword.upper()], include_docs=True, reduce=False, ).one() @classmethod def get_by_domain(cls, domain, limit=None, skip=None, include_docs=True): extra_kwargs = {} if limit is not None: extra_kwargs['limit'] = limit if skip is not None: extra_kwargs['skip'] = skip return cls.view( 'reminders/survey_keywords', startkey=[domain], endkey=[domain, {}], include_docs=include_docs, reduce=False, **extra_kwargs ).all() class SurveySample(Document): domain = StringProperty() name = StringProperty() contacts = ListProperty(StringProperty) time_zone = StringProperty() def get_time_zone(self): return self.time_zone @classmethod def get_all(cls, domain): return cls.view('reminders/sample_by_domain', startkey=[domain], endkey=[domain, {}], include_docs=True ).all() class SurveyWave(DocumentSchema): date = DateProperty() time = TimeProperty() end_date = DateProperty() form_id = StringProperty() reminder_definitions = DictProperty() # Dictionary of CommCareCaseGroup._id : CaseReminderHandler._id delegation_tasks = DictProperty() # Dictionary of {sample id : {contact id : delegation task id, ...}, ...} def has_started(self, parent_survey_ref): samples = [CommCareCaseGroup.get(sample["sample_id"]) for sample in parent_survey_ref.samples] for sample in samples: if CaseReminderHandler.timestamp_to_utc(sample, datetime.combine(self.date, self.time)) <= datetime.utcnow(): return True return False class Survey(Document): domain = StringProperty() name = StringProperty() waves = SchemaListProperty(SurveyWave) followups = ListProperty(DictProperty) samples = ListProperty(DictProperty) send_automatically = BooleanProperty() send_followup = BooleanProperty() @classmethod def get_all(cls, domain): return get_surveys_in_domain(domain) def has_started(self): for wave in self.waves: if wave.has_started(self): return True return False def update_delegation_tasks(self, submitting_user_id): utcnow = datetime.utcnow() # Get info about each CATI sample and the instance of that sample used for this survey cati_sample_data = {} for sample_json in self.samples: if sample_json["method"] == "CATI": sample_id = sample_json["sample_id"] cati_sample_data[sample_id] = { "sample_object": CommCareCaseGroup.get(sample_id), "incentive" : sample_json["incentive"], "cati_operator" : sample_json["cati_operator"], } for wave in self.waves: if wave.has_started(self): continue # Close any tasks for samples that are no longer used, and for contacts that are no longer in the samples for sample_id, tasks in wave.delegation_tasks.items(): if sample_id not in cati_sample_data: for case_id, delegation_case_id in tasks.items(): close_task(self.domain, delegation_case_id, submitting_user_id) del wave.delegation_tasks[sample_id] else: for case_id in list(set(tasks.keys()).difference( cati_sample_data[sample_id]["sample_object"].cases)): close_task(self.domain, tasks[case_id], submitting_user_id) del wave.delegation_tasks[sample_id][case_id] # Update / Create tasks for existing / new contacts for sample_id, sample_data in cati_sample_data.items(): task_activation_datetime = CaseReminderHandler.timestamp_to_utc(sample_data["sample_object"], datetime.combine(wave.date, wave.time)) task_deactivation_datetime = CaseReminderHandler.timestamp_to_utc(sample_data["sample_object"], datetime.combine(wave.end_date, wave.time)) if sample_id not in wave.delegation_tasks: wave.delegation_tasks[sample_id] = {} for case_id in sample_data["sample_object"].cases: wave.delegation_tasks[sample_id][case_id] = create_task( CommCareCase.get(case_id), submitting_user_id, sample_data["cati_operator"], wave.form_id, task_activation_datetime, task_deactivation_datetime, sample_data["incentive"] ) else: for case_id in sample_data["sample_object"].cases: delegation_case_id = wave.delegation_tasks[sample_id].get(case_id, None) if delegation_case_id is None: wave.delegation_tasks[sample_id][case_id] = create_task( CommCareCase.get(case_id), submitting_user_id, sample_data["cati_operator"], wave.form_id, task_activation_datetime, task_deactivation_datetime, sample_data["incentive"] ) else: delegation_case = CommCareCase.get(delegation_case_id) if (delegation_case.owner_id != sample_data["cati_operator"] or delegation_case.get_case_property("start_date") != task_activation_datetime or delegation_case.get_case_property("end_date") != task_deactivation_datetime or delegation_case.get_case_property("form_id") != wave.form_id): update_task( self.domain, delegation_case_id, submitting_user_id, sample_data["cati_operator"], wave.form_id, task_activation_datetime, task_deactivation_datetime, sample_data["incentive"] ) from .signals import * ```
[ { "content": "Repeat the code exactly as the original, including blank lines:\n```python\n# Copyright 2016 Casey Jaymes\n\n# This file is part of PySCAP.\n#\n# PySCAP is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Softw...
[ { "content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n# Copyright 2016 Casey Jaymes\n\n# This file is part of PySCAP.\n#\n# PySCAP is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n...
```python # Copyright 2016 Casey Jaymes # This file is part of PySCAP. # # PySCAP is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PySCAP is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with PySCAP. If not, see <http://www.gnu.org/licenses/>. import logging from scap.model.oval_5.defs.windows.RegistryBehaviors import RegistryBehaviors logger = logging.getLogger(__name__) class RegkeyEffectiveRights53Behaviors(RegistryBehaviors): MODEL_MAP = { 'attributes': { 'include_group': {'type': 'BooleanType', 'default': True}, 'resolve_group': {'type': 'BooleanType', 'default': False}, } } ```
[ { "content": "Repeat the full code snippet:\n```python\n\"\"\"\n2.2.2: If `onFulfilled` is a function\nhttps://github.com/promises-aplus/promises-tests/blob/2.1.1/lib/tests/2.2.2.js\n\"\"\"\nimport threading\n\nfrom sleekxmpp.test import SleekTest\n\n\nclass Promise_2_2_2_1_TestCase(SleekTest):\n \"\"\"\n ...
[ { "content": "Repeat the full code snippet:\n<|memory_start|>```python\n\"\"\"\n2.2.2: If `onFulfilled` is a function\nhttps://github.com/promises-aplus/promises-tests/blob/2.1.1/lib/tests/2.2.2.js\n\"\"\"\nimport threading\n\nfrom sleekxmpp.test import SleekTest\n\n\nclass Promise_2_2_2_1_TestCase(SleekTest):\...
```python """ 2.2.2: If `onFulfilled` is a function https://github.com/promises-aplus/promises-tests/blob/2.1.1/lib/tests/2.2.2.js """ import threading from sleekxmpp.test import SleekTest class Promise_2_2_2_1_TestCase(SleekTest): """ 2.2.2.1: it must be called after `promise` is fulfilled, with `promise`'s fulfillment value as its first argument. """ dummy = {'dummy': 'dummy'} sentinel = {'sentinel': 'sentinel'} def setUp(self): from sleekpromises import register_sleek_promises register_sleek_promises() self.session = {} self.stream_start(plugins=['sleekpromises_scheduler', ]) self.scheduler = self.xmpp['sleekpromises_scheduler'] def tearDown(self): self.stream_close() def test_already_fulfilled(self): self.session['called'] = False event = threading.Event() def fulfilled_called(arg): self.session['called'] = True self.assertIs(self.sentinel, arg) event.set() def rejected_called(arg): self.assertFalse(self.session['called']) # Create a promise and resolve it promise = self.scheduler.promise() promise.resolved(self.sentinel) promise.then(fulfilled_called, rejected_called) event.wait(1.0) self.assertTrue(self.session['called']) def test_immediately_fulfilled(self): self.session['called'] = False event = threading.Event() def fulfilled_called(arg): self.session['called'] = True self.assertIs(self.sentinel, arg) event.set() def rejected_called(arg): self.assertFalse(self.session['called']) # Create a promise and resolve it promise = self.scheduler.promise() promise.then(fulfilled_called, rejected_called) promise.resolved(self.sentinel) event.wait(1.0) self.assertTrue(self.session['called']) def test_eventually_fulfilled(self): self.session['called'] = False event = threading.Event() def fulfilled_called(arg): self.session['called'] = True self.assertIs(self.sentinel, arg) event.set() def rejected_called(arg): self.assertFalse(self.session['called']) def deferred_method(): self.session['promise'].resolved(self.sentinel) # Create a promise and store it off promise = self.scheduler.promise() self.session['promise'] = promise promise.then(fulfilled_called, rejected_called) # Schedule it on a different thread. self.scheduler.schedule_task(deferred_method, delay=0.1) event.wait(1.0) self.assertTrue(self.session['called']) class Promise_2_2_2_2_TestCase(SleekTest): """ 2.2.2.2: it must not be called before `promise` is fulfilled """ dummy = {'dummy': 'dummy'} sentinel = {'sentinel': 'sentinel'} def setUp(self): from sleekpromises import register_sleek_promises register_sleek_promises() self.session = {} self.stream_start(plugins=['sleekpromises_scheduler', ]) self.scheduler = self.xmpp['sleekpromises_scheduler'] def tearDown(self): self.stream_close() def test_fulfilled_after_a_delay(self): self.session['afterResolve'] = False event = threading.Event() def fulfilled_called(arg): self.assertTrue(self.session['afterResolve']) event.set() def deferred(): promise.resolved(self.dummy) self.session['afterResolve'] = True # Create a promise and resolve it promise = self.scheduler.promise() self.session['promise'] = promise promise.then(fulfilled_called) self.scheduler.schedule_task(deferred, delay=0.05) event_wait = event.wait(1.0) self.assertTrue(self.session['afterResolve']) self.assertTrue(event_wait) def test_never_fulfilled(self): self.session['called'] = False event = threading.Event() def fulfilled_called(arg): self.session['called'] = True event.set() promise = self.scheduler.promise() promise.then(fulfilled_called) event_wait = event.wait(0.150) self.assertFalse(self.session['called']) self.assertFalse(event_wait) class Promise_2_2_2_3_TestCase(SleekTest): """ 2.2.2.3: it must not be called more than once. """ dummy = {'dummy': 'dummy'} sentinel = {'sentinel': 'sentinel'} def setUp(self): from sleekpromises import register_sleek_promises register_sleek_promises() self.session = {} self.stream_start(plugins=['sleekpromises_scheduler', ]) self.scheduler = self.xmpp['sleekpromises_scheduler'] def tearDown(self): self.stream_close() def test_already_fulfilled(self): self.session['times_called'] = 0 event = threading.Event() def fulfilled(arg): self.session['times_called'] += 1 event.set() promise = self.scheduler.promise() promise.resolved(self.dummy) promise.then(fulfilled) event_set = event.wait(1.0) self.assertTrue(event_set) self.assertEqual(1, self.session['times_called']) def test_trying_to_fulfill_a_pending_promise_more_than_once_immediately(self): self.session['times_called'] = 0 event = threading.Event() def fulfilled(arg): self.session['times_called'] += 1 event.set() promise = self.scheduler.promise() promise.then(fulfilled) promise.resolved(self.dummy) promise.resolved(self.dummy) event_set = event.wait(1.0) self.assertTrue(event_set) self.assertEqual(1, self.session['times_called']) def test_trying_to_fulfill_a_pending_promise_more_than_once_delayed(self): self.session['times_called'] = 0 event = threading.Event() def fulfilled(arg): self.session['times_called'] += 1 event.set() def deferred(): promise = self.session['promise'] promise.resolved(self.dummy) promise.resolved(self.dummy) promise = self.scheduler.promise() self.session['promise'] = promise promise.then(fulfilled) self.scheduler.schedule_task(deferred, delay=0.50) event_set = event.wait(1.0) self.assertTrue(event_set) self.assertEqual(1, self.session['times_called']) def test_trying_to_fulfill_a_pending_promise_more_than_once_immediately_then_delayed(self): self.session['times_called'] = 0 event = threading.Event() def fulfilled(arg): self.session['times_called'] += 1 event.set() def deferred(): promise = self.session['promise'] promise.resolved(self.dummy) promise = self.scheduler.promise() self.session['promise'] = promise promise.then(fulfilled) promise.resolved(self.dummy) self.scheduler.schedule_task(deferred, delay=0.50) event_set = event.wait(1.0) self.assertTrue(event_set) self.assertEqual(1, self.session['times_called']) def test_when_multiple_then_calls_are_made_spaced_apart_in_time(self): self.session['times_called'] = [0, 0, 0] event = threading.Event() def fulfilled_0(arg): self.session['times_called'][0] += 1 def fulfilled_1(arg): self.session['times_called'][1] += 1 def fulfilled_2(arg): self.session['times_called'][2] += 1 event.set() def resolve_function(): promise = self.session['promise'] promise.resolved(self.dummy) promise = self.scheduler.promise() self.session['promise'] = promise promise.then(fulfilled_0) self.scheduler.schedule_task(lambda: promise.then(fulfilled_1), delay=0.05) self.scheduler.schedule_task(lambda: promise.then(fulfilled_2), delay=0.10) self.scheduler.schedule_task(resolve_function, delay=0.50) event_set = event.wait(1.0) self.assertTrue(event_set) self.assertEqual([1, 1, 1], self.session['times_called']) def test_when_then_is_interleaved_with_fulfillment(self): self.session['times_called'] = [0, 0] event = threading.Event() def fulfilled_0(arg): self.session['times_called'][0] += 1 def fulfilled_1(arg): self.session['times_called'][1] += 1 event.set() promise = self.scheduler.promise() self.session['promise'] = promise promise.then(fulfilled_0) promise.resolved(self.dummy) promise.then(fulfilled_1) event_set = event.wait(1.0) self.assertTrue(event_set) self.assertEqual([1, 1], self.session['times_called']) ```
[ { "content": "Return the code unaltered:\n```python\n#!/usr/bin/env python\n\nimport sys\nimport json\nfrom types import SimpleNamespace\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\nfrom webbrowser import open_new_tab\n\nimport tornado.ioloop\nimport tornado.web\n\nfrom .console import s...
[ { "content": "Return the code unaltered:\n<|memory_start|>```python\n#!/usr/bin/env python\n\nimport sys\nimport json\nfrom types import SimpleNamespace\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\nfrom webbrowser import open_new_tab\n\nimport tornado.ioloop\nimport tornado.web\n\nfrom ....
```python #!/usr/bin/env python import sys import json from types import SimpleNamespace from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter from webbrowser import open_new_tab import tornado.ioloop import tornado.web from .console import start_message, append_message, print_message from .infos import DbInfo, PkgInfo, GroupInfo, VDepInfo # Tornado entry class MainHandler(tornado.web.RequestHandler): def parse_args(self, **kargs): result = {} for key in kargs: defvalue = str(kargs[key]) if type(kargs[key]) is int: result[key] = int(self.get_argument(key, defvalue)) elif type(kargs[key]) is bool: result[key] = self.get_argument(key, defvalue) != "False" else: result[key] = self.get_argument(key, defvalue) print_message("get arg %r: %r" % (key, result[key])) return result def get(self): print_message("\n" + str(self.request)) args = SimpleNamespace(**self.parse_args( maxlevel=1000, maxreqs=1000, maxdeps=1000, drawsize="isize", usemagic=False, straightline=False, enablephysics=False, aligntop=False, disableallphysics=False, debugperformance=False, byrepos=False, showallvdeps=False)) dbinfo = DbInfo() start_message("Loading local database ...") dbinfo.find_all(args.showallvdeps) append_message("done") start_message("Finding all dependency circles ... ") dbinfo.find_circles() append_message("done") dbinfo.topology_sort(args.usemagic, args.aligntop, args.byrepos) dbinfo.calcSizes() start_message("Rendering ... ") nodes = [] links = [] nodes.append({"id": 0, "label": "level 1 group", "level": 0, "shape": "triangleDown", "isize": 0, "csize": 0, "cssize": 0, "deps": "", "reqs": "", "optdeps": "", "desc": "", "version": "", "group": "group", "groups": "", "provides": "", }) ids = 1 for pkg in sorted(dbinfo.all_pkgs.values(), key=lambda x: x.level): append_message("%s" % pkg.name) pkg.id = ids ids += 1 if pkg.level < args.maxlevel: group = "normal" if pkg.level == 0: group = "standalone" elif type(pkg) is GroupInfo: group = "group" elif type(pkg) is VDepInfo: group = "vdep" # if not args.showallvdeps and len(pkg.requiredby) == 0: # continue elif pkg.explicit: group = "explicit" nodes.append({"id": pkg.id, "label": pkg.name, "level": pkg.level, "group": group, "isize": pkg.isize, "csize": pkg.csize, "cssize": pkg.cssize, "deps": ", ".join(pkg.deps), "reqs": ", ".join(pkg.requiredby), "optdeps": ", ".join(pkg.optdeps), "groups": ", ".join(pkg.groups), "provides": ", ".join(pkg.provides), "desc": pkg.desc, "version": pkg.version, "repo": pkg.repo, }) ids = 0 for pkg in sorted(dbinfo.all_pkgs.values(), key=lambda x: x.level): if pkg.level < args.maxlevel: if len(pkg.deps) == 0 and len(pkg.requiredby) == 0: links.append({"id": ids, "from": pkg.id, "to": 0}) ids += 1 if len(pkg.deps) < args.maxdeps: for dep in pkg.deps: if dep not in pkg.circledeps: if len(dbinfo.get(dep).requiredby) < args.maxreqs: links.append({"id": ids, "from": pkg.id, "to": dbinfo.get(dep).id}) ids += 1 for dep in pkg.circledeps: if (pkg.id != dbinfo.get(dep).id): links.append({"id": ids, "to": pkg.id, "from": dbinfo.get(dep).id, "color": "rgb(244,67,54,0.8)"}) ids += 1 for dep in pkg.optdeps: if dep in dbinfo.all_pkgs: links.append({"id": ids, "from": pkg.id, "to": dbinfo.get(dep).id, "dashes": True, "color": "rgb(255,235,59)"}) ids += 1 print_message("Writing HTML") self.render("templates/index.template.html", nodes=json.dumps(nodes), links=json.dumps(links), options=args, optionsjson=json.dumps(args.__dict__)) def make_app(): import os return tornado.web.Application([ (r"/", MainHandler), ], debug=True, static_path=os.path.join(os.path.dirname(__file__), "static")) def main(): argp = ArgumentParser(description='start PacVis server', formatter_class=ArgumentDefaultsHelpFormatter) argp.add_argument('-p', '--port', type=int, default=8888, help='listen at given port') argp.add_argument('-s', '--host', type=str, default='localhost', help='listen at given hostname') argp.add_argument('-b', '--browser', action='store_true', help='start a browser') args = argp.parse_args() app = make_app() app.listen(args.port, address=args.host) print_message(f"Start PacVis at http://{args.host}:{args.port}/") if args.browser: url = f'http://{args.host}:{args.port}/' print_message(f'open in browser: {url}') open_new_tab(url) else: print_message('use --browser to open a browser automatically.') try: tornado.ioloop.IOLoop.current().start() except KeyboardInterrupt: print_message("Received interrupt from keyboard, shutting down ...") sys.exit(0) if __name__ == "__main__": main() ```
[ { "content": "Repeat the full code snippet:\n```python\n# -*- coding: UTF-8 -*-\n\n\"\"\"\ntitlecase.py v0.2\nOriginal Perl version by: John Gruber http://daringfireball.net/ 10 May 2008\nPython version by Stuart Colville http://muffinresearch.co.uk\nLicense: http://www.opensource.org/licenses/mit-license.php\n...
[ { "content": "Repeat the full code snippet:\n<|memory_start|>```python\n# -*- coding: UTF-8 -*-\n\n\"\"\"\ntitlecase.py v0.2\nOriginal Perl version by: John Gruber http://daringfireball.net/ 10 May 2008\nPython version by Stuart Colville http://muffinresearch.co.uk\nLicense: http://www.opensource.org/licenses/m...
```python # -*- coding: UTF-8 -*- """ titlecase.py v0.2 Original Perl version by: John Gruber http://daringfireball.net/ 10 May 2008 Python version by Stuart Colville http://muffinresearch.co.uk License: http://www.opensource.org/licenses/mit-license.php """ import unittest import sys import re SMALL = 'a|an|and|as|at|but|by|en|for|if|in|of|on|or|the|to|v\.?|via|vs\.?' PUNCT = "[!\"#$%&'‘()*+,-./:;?@[\\\\\\]_`{|}~]" SMALL_WORDS = re.compile(r'^(%s)$' % SMALL, re.I) INLINE_PERIOD = re.compile(r'[a-zA-Z][.][a-zA-Z]') UC_ELSEWHERE = re.compile(r'%s*?[a-zA-Z]+[A-Z]+?' % PUNCT) CAPFIRST = re.compile(r"^%s*?([A-Za-z])" % PUNCT) SMALL_FIRST = re.compile(r'^(%s*)(%s)\b' % (PUNCT, SMALL), re.I) SMALL_LAST = re.compile(r'\b(%s)%s?$' % (SMALL, PUNCT), re.I) SUBPHRASE = re.compile(r'([:.;?!][ ])(%s)' % SMALL) def titlecase(text): """ Titlecases input text This filter changes all words to Title Caps, and attempts to be clever about *un*capitalizing SMALL words like a/an/the in the input. The list of "SMALL words" which are not capped comes from the New York Times Manual of Style, plus 'vs' and 'v'. """ words = re.split('\s', text) line = [] for word in words: if INLINE_PERIOD.search(word) or UC_ELSEWHERE.match(word): line.append(word) continue if SMALL_WORDS.match(word): line.append(word.lower()) continue line.append(CAPFIRST.sub(lambda m: m.group(0).upper(), word)) line = " ".join(line) line = SMALL_FIRST.sub(lambda m: '%s%s' % ( m.group(1), m.group(2).capitalize() ), line) line = SMALL_LAST.sub(lambda m: m.group(0).capitalize(), line) line = SUBPHRASE.sub(lambda m: '%s%s' % ( m.group(1), m.group(2).capitalize() ), line) return line class TitlecaseTests(unittest.TestCase): """Tests to ensure titlecase follows all of the rules""" def test_q_and_a(self): u"""Testing: Q&A With Steve Jobs: 'That’s What Happens In Technology' """ text = titlecase( u"Q&A with steve jobs: 'that’s what happens in technology'" ) result = u"Q&A With Steve Jobs: 'That’s What Happens in Technology'" self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_at_and_t(self): u"""Testing: What Is AT&T's Problem?""" text = titlecase(u"What is AT&T’s problem?") result = u"What Is AT&T’s Problem?" self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_apple_deal(self): """Testing: Apple Deal With AT&T Falls Through""" text = titlecase("Apple deal with AT&T falls through") result = "Apple Deal With AT&T Falls Through" self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_this_v_that(self): """Testing: this v that""" text = titlecase("this v that") result = "This v That" self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_this_v_that2(self): """Testing: this v. that""" text = titlecase("this v. that") result = "This v. That" self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_this_vs_that(self): """Testing: this vs that""" text = titlecase("this vs that") result = "This vs That" self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_this_vs_that2(self): """Testing: this vs. that""" text = titlecase("this vs. that") result = "This vs. That" self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_apple_sec(self): u"""Testing: The SEC’s Apple Probe: What You Need to Know""" text = titlecase("The SEC’s Apple Probe: What You Need to Know") result = u"The SEC’s Apple Probe: What You Need to Know" self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_small_word_quoted(self): """Testing: 'by the Way, Small word at the start but within quotes.'""" text = titlecase( "'by the Way, small word at the start but within quotes.'" ) result = "'By the Way, Small Word at the Start but Within Quotes.'" self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_small_word_end(self): """Testing: Small word at end is nothing to be afraid of""" text = titlecase("Small word at end is nothing to be afraid of") result = "Small Word at End Is Nothing to Be Afraid Of" self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_sub_phrase_small_word(self): """Testing: Starting Sub-Phrase With a Small Word: a Trick, Perhaps?""" text = titlecase( "Starting Sub-Phrase With a Small Word: a Trick, Perhaps?" ) result = "Starting Sub-Phrase With a Small Word: A Trick, Perhaps?" self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_small_word_quotes(self): """Testing: Sub-Phrase With a Small Word in Quotes: 'a Trick...""" text = titlecase( "Sub-Phrase With a Small Word in Quotes: 'a Trick, Perhaps?'" ) result = "Sub-Phrase With a Small Word in Quotes: 'A Trick, Perhaps?'" self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_small_word_double_quotes(self): """Testing: Sub-Phrase With a Small Word in Quotes: \"a Trick...""" text = titlecase( 'Sub-Phrase With a Small Word in Quotes: "a Trick, Perhaps?"' ) result = 'Sub-Phrase With a Small Word in Quotes: "A Trick, Perhaps?"' self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_nothing_to_be_afraid_of(self): """Testing: \"Nothing to Be Afraid of?\"""" text = titlecase('"Nothing to Be Afraid of?"') result = '"Nothing to Be Afraid Of?"' self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_nothing_to_be_afraid_of2(self): """Testing: \"Nothing to Be Afraid Of?\"""" text = titlecase('"Nothing to be Afraid Of?"') result = '"Nothing to Be Afraid Of?"' self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_a_thing(self): """Testing: a thing""" text = titlecase('a thing') result = 'A Thing' self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_vapourware(self): """Testing: 2lmc Spool: 'Gruber on OmniFocus and Vapo(u)rware'""" text = titlecase( "2lmc Spool: 'gruber on OmniFocus and vapo(u)rware'" ) result = "2lmc Spool: 'Gruber on OmniFocus and Vapo(u)rware'" self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_domains(self): """Testing: this is just an example.com""" text = titlecase('this is just an example.com') result = 'This Is Just an example.com' self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_domains2(self): """Testing: this is something listed on an del.icio.us""" text = titlecase('this is something listed on del.icio.us') result = 'This Is Something Listed on del.icio.us' self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_itunes(self): """Testing: iTunes should be unmolested""" text = titlecase('iTunes should be unmolested') result = 'iTunes Should Be Unmolested' self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_thoughts_on_music(self): u"""Testing: Reading Between the Lines of Steve Jobs’s...""" text = titlecase( u'Reading between the lines of steve jobs’s ‘thoughts on music’' ) result = u'Reading Between the Lines of Steve Jobs’s ‘Thoughts on Music’' self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_repair_perms(self): u"""Testing: Seriously, ‘Repair Permissions’ Is Voodoo""" text = titlecase(u'seriously, ‘repair permissions’ is voodoo') result = u'Seriously, ‘Repair Permissions’ Is Voodoo' self.assertEqual(text, result, "%s should be: %s" % (text, result, )) def test_generalissimo(self): """Testing: Generalissimo Francisco Franco...""" text = titlecase( 'generalissimo francisco franco: still dead; kieren McCarthy: '\ 'still a jackass' ) result = u"""Generalissimo Francisco Franco: Still Dead; Kieren McCarthy: Still a Jackass.""" self.assertEqual(text, result, "%s should be: %s" % (text, result, )) if __name__ == '__main__': if not sys.stdin.isatty(): for line in sys.stdin: print(titlecase(line)) else: suite = unittest.TestLoader().loadTestsFromTestCase(TitlecaseTests) unittest.TextTestRunner(verbosity=2).run(suite) ```
[ { "content": "Replicate the source code:\n```python\n#!/usr/bin/env python\n\nfrom datetime import datetime\nfrom itertools import chain\nimport re\n\nimport numpy as np\nimport click\n\nfrom labman.db.process import (\n SamplePlatingProcess, GDNAExtractionProcess, GDNAPlateCompressionProcess,\n LibraryPr...
[ { "content": "Replicate the source code:\n<|memory_start|>```python\n#!/usr/bin/env python\n\nfrom datetime import datetime\nfrom itertools import chain\nimport re\n\nimport numpy as np\nimport click\n\nfrom labman.db.process import (\n SamplePlatingProcess, GDNAExtractionProcess, GDNAPlateCompressionProcess...
```python #!/usr/bin/env python from datetime import datetime from itertools import chain import re import numpy as np import click from labman.db.process import ( SamplePlatingProcess, GDNAExtractionProcess, GDNAPlateCompressionProcess, LibraryPrep16SProcess, NormalizationProcess, QuantificationProcess, LibraryPrepShotgunProcess, PoolingProcess, SequencingProcess) from labman.db.user import User from labman.db.plate import PlateConfiguration, Plate from labman.db.equipment import Equipment from labman.db.composition import ReagentComposition from labman.db.sql_connection import TRN def get_samples(): with TRN: TRN.add("SELECT sample_id FROM qiita.study_sample") return TRN.execute_fetchflatten() def create_sample_plate_process(user, samples): plate_config = PlateConfiguration(1) num_rows = plate_config.num_rows num_cols = plate_config.num_columns sp_process = SamplePlatingProcess.create( user, plate_config, 'Test plate %s' % datetime.now()) # Plate the samples for idx, sample in enumerate(samples): i = int(idx / num_cols) + 1 j = (idx % num_cols) + 1 # Make sure that the user didn't pass more samples than wells if i > num_rows: break sp_process.update_well(i, j, sample) sample_plate = sp_process.plate return sp_process, sample_plate def create_gdna_extraction_process(user, plate): kingfisher = Equipment(11) epmotion = Equipment(6) epmotion_tool = Equipment(15) extraction_kit = ReagentComposition(1) ext_process = GDNAExtractionProcess.create( user, plate, kingfisher, epmotion, epmotion_tool, extraction_kit, 100, 'GDNA test plate %s' % datetime.now()) gdna_plate = ext_process.plates[0] return ext_process, gdna_plate def create_amplicon_prep(user, plate): primer_plate = Plate(11) epmotion = Equipment(6) master_mix = ReagentComposition(2) water_lot = ReagentComposition(3) epmotion_tool_tm300 = Equipment(16) epmotion_tool_tm50 = Equipment(17) amplicon_process = LibraryPrep16SProcess.create( user, plate, primer_plate, 'Amplicon test plate %s' % datetime.now(), epmotion, epmotion_tool_tm300, epmotion_tool_tm50, master_mix, water_lot, 75,) amplicon_plate = amplicon_process.plates[0] return amplicon_process, amplicon_plate def create_compression_process(user, gdna_plates): comp_process = GDNAPlateCompressionProcess.create( user, gdna_plates, 'Compressed test plate %s' % datetime.now(), Equipment(6)) compressed_plate = comp_process.plates[0] return comp_process, compressed_plate def create_quantification_process(user, plate): plate_config = plate.plate_configuration concentrations = np.around( np.random.rand(plate_config.num_rows, plate_config.num_columns), 6) quant_process = QuantificationProcess.create(user, plate, concentrations) return quant_process def create_pool_quantification_process(user, pools): concentrations = np.around(np.random.rand(len(pools)), 6) concentrations = [{'composition': p, 'concentration': c} for p, c in zip(pools, concentrations)] return QuantificationProcess.create_manual(user, concentrations) def create_normalization_process(user, quant_process): water = ReagentComposition(3) norm_process = NormalizationProcess.create( user, quant_process, water, 'Normalized test plate %s' % datetime.now()) norm_plate = norm_process.plates[0] return norm_process, norm_plate def create_shotgun_process(user, norm_plate): kappa = ReagentComposition(4) stub = ReagentComposition(5) shotgun_process = LibraryPrepShotgunProcess.create( user, norm_plate, 'Test Shotgun Library %s' % datetime.now(), kappa, stub, 4000, Plate(19), Plate(20)) shotgun_plate = shotgun_process.plates[0] return shotgun_process, shotgun_plate def create_plate_pool_process(user, quant_process, plate, func_data): input_compositions = [] echo = Equipment(8) for well in chain.from_iterable(plate.layout): if well is not None: input_compositions.append({ 'composition': well.composition, 'input_volume': 1, 'percentage_of_output': 1/9.0}) pool_process = PoolingProcess.create( user, quant_process, 'New test pool name %s' % datetime.now(), 4, input_compositions, func_data, robot=echo) return pool_process def create_pools_pool_process(user, quant_process, pools): input_compositions = [ {'composition': p, 'input_volume': 1, 'percentage_of_output': 1/9.0} for p in pools] pool_process = PoolingProcess.create( user, quant_process, 'New pool name %s' % datetime.now(), 5, input_compositions, {"function": "amplicon_pool", "parameters": {}}) return pool_process def create_sequencing_process(user, pools): seq_process = SequencingProcess.create( user, pools, 'New sequencing run %s' % datetime.now(), 'Run experiment %s' % datetime.now(), Equipment(18), 151, 151, User('admin@foo.bar'), contacts=[User('test@foo.bar'), User('demo@microbio.me')]) return seq_process def amplicon_workflow(user, samples): # Sample Plating sp_process, sample_plate = create_sample_plate_process(user, samples[:96]) # gDNA extraction ext_process, gdna_plate = create_gdna_extraction_process( user, sample_plate) # Amplicon library prep amplicon_process, amplicon_plate = create_amplicon_prep(user, gdna_plate) # Library plate quantification amplicon_quant_process = create_quantification_process( user, amplicon_plate) # Plate pooling process plate_pool_process = create_plate_pool_process( user, amplicon_quant_process, amplicon_plate, {'function': 'amplicon', 'parameters': {"dna_amount": 240, "min_val": 1, "max_val": 15, "blank_volume": 2, "robot": 6, "destination": 1}}) # Quantify pools pool_quant_process = create_pool_quantification_process( user, [plate_pool_process.pool]) # Create sequencing pool process seq_pool_process = create_pools_pool_process( user, pool_quant_process, [plate_pool_process.pool]) # Sequencing process seq_process = create_sequencing_process(user, [seq_pool_process.pool]) return seq_process def shotgun_workflow(user, samples): # Sample Plating sp_process, sample_plate = create_sample_plate_process(user, samples[:96]) # gDNA extraction ext_process, gdna_plate = create_gdna_extraction_process( user, sample_plate) # gDNA compression comp_process, compressed_plate = create_compression_process( user, [gdna_plate]) # gDNA compressed quantification gdna_comp_quant_process = create_quantification_process( user, compressed_plate) # Normalization process norm_process, norm_plate = create_normalization_process( user, gdna_comp_quant_process) # Library prep shotgun shotgun_process, shotgun_plate = create_shotgun_process(user, norm_plate) # Quantify library plate shotgun_quant_process = create_quantification_process(user, shotgun_plate) # Pooling process pool_process = create_plate_pool_process( user, shotgun_quant_process, shotgun_plate, {'function': 'equal', 'parameters': {'total_vol': 60, 'size': 500}}) # Sequencing process seq_process = create_sequencing_process(user, [pool_process.pool]) return seq_process @click.command() def integration_tests(): samples = get_samples() user = User('test@foo.bar') amplicon_seq_process = amplicon_workflow(user, samples) shotgun_seq_process = shotgun_workflow(user, samples) obs = amplicon_seq_process.generate_sample_sheet() res = re.match(EXP_AMPLICON_SAMPLE_SHEET, obs) if res is None: raise ValueError( 'Amplicon sample sheet does not match expected regex:\n%s' % obs) obs = shotgun_seq_process.generate_sample_sheet() res = re.match(EXP_SHOTGUN_SAMPLE_SHEET, obs) if res is None: raise ValueError( 'Shotgun sample sheet does not match expected regex:\n%s' % obs) EXP_AMPLICON_SAMPLE_SHEET = r"""# PI,Admin,admin@foo.bar # Contact,Demo,Dude # Contact emails,demo@microbio.me,test@foo.bar \[Header\] IEMFileVersion,4 Investigator Name,Admin Experiment Name,Run experiment \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6} Date,\d{4}-\d{2}-\d{2} Workflow,GenerateFASTQ Application,FASTQ Only Assay,Amplicon Description, Chemistry,Default \[Reads\] 151 151 \[Settings\] ReverseComplement,0 \[Data\] Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,index,Sample_Project,Description,, New_sequencing_run_\d{4}-\d{2}-\d{2}_\d{2}_\d{2}_\d{2}_\d{6},,,,,NNNNNNNNNNNN,,,,,""" # noqa EXP_SHOTGUN_SAMPLE_SHEET = r"""# PI,Admin,admin@foo.bar # Contact,Demo,Dude # Contact emails,demo@microbio.me,test@foo.bar \[Header\] IEMFileVersion,4 Investigator Name,Admin Experiment Name,Run experiment \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6} Date,\d{4}-\d{2}-\d{2} Workflow,GenerateFASTQ Application,FASTQ Only Assay,Metagenomics Description, Chemistry,Default \[Reads\] 151 151 \[Settings\] ReverseComplement,0 \[Data\] Lane,Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,index,I5_Index_ID,index2,Sample_Project,Description 1,1_SKB1_640202,1_SKB1_640202,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},A23,iTru7_101_09,TGTACACC,iTru5_08_A,CATCTGCT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKB1.640202 1,1_SKB2_640194,1_SKB2_640194,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},A17,iTru7_101_06,AACAACCG,iTru5_05_A,GGTACGAA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKB2.640194 1,1_SKB3_640195,1_SKB3_640195,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},E1,iTru7_102_10,GTTAAGGC,iTru5_09_B,ACGGACTT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKB3.640195 1,1_SKB4_640189,1_SKB4_640189,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},C17,iTru7_102_06,TGTGCGTT,iTru5_05_B,AAGCATCG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKB4.640189 1,1_SKB5_640181,1_SKB5_640181,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},A13,iTru7_101_04,GATCCATG,iTru5_03_A,AACACCAC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKB5.640181 1,1_SKB6_640176,1_SKB6_640176,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},E3,iTru7_102_11,AAGCCACA,iTru5_10_B,CATGTGTG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKB6.640176 1,1_SKB7_640196,1_SKB7_640196,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},A5,iTru7_115_11,CTTAGGAC,iTru5_123_H,CTCTTGTC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKB7.640196 1,1_SKB8_640193,1_SKB8_640193,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},A1,iTru7_115_09,AGCACTTC,iTru5_121_H,GATGCTAC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKB8.640193 1,1_SKB9_640200,1_SKB9_640200,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},C9,iTru7_102_02,CTTACCTG,iTru5_01_B,AGTGGCAA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKB9.640200 1,1_SKD1_640179,1_SKD1_640179,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},C1,iTru7_101_10,GTATGCTG,iTru5_09_A,CTCTCAGA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKD1.640179 1,1_SKD2_640178,1_SKD2_640178,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},A19,iTru7_101_07,ACTCGTTG,iTru5_06_A,CGATCGAT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKD2.640178 1,1_SKD3_640198,1_SKD3_640198,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},C3,iTru7_101_11,TGATGTCC,iTru5_10_A,TCGTCTGA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKD3.640198 1,1_SKD4_640185,1_SKD4_640185,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},C23,iTru7_102_09,ACAGCTCA,iTru5_08_B,ACCTCTTC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKD4.640185 1,1_SKD5_640186,1_SKD5_640186,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},C11,iTru7_102_03,CGTTGCAA,iTru5_02_B,GTGGTATG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKD5.640186 1,1_SKD6_640190,1_SKD6_640190,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},A15,iTru7_101_05,GCCTATCA,iTru5_04_A,CGTATCTC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKD6.640190 1,1_SKD7_640191,1_SKD7_640191,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},C19,iTru7_102_07,TAGTTGCG,iTru5_06_B,TACTCCAG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKD7.640191 1,1_SKD8_640184,1_SKD8_640184,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},A3,iTru7_115_10,GCATACAG,iTru5_122_H,GAACGGTT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKD8.640184 1,1_SKD9_640182,1_SKD9_640182,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},C15,iTru7_102_05,TCACGTTC,iTru5_04_B,CGTCAAGA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKD9.640182 1,1_SKM1_640183,1_SKM1_640183,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},E5,iTru7_102_12,ACACGGTT,iTru5_11_B,TGCCTCAA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKM1.640183 1,1_SKM2_640199,1_SKM2_640199,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},C7,iTru7_102_01,ATAAGGCG,iTru5_12_A,CATTCGTC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKM2.640199 1,1_SKM3_640197,1_SKM3_640197,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},C13,iTru7_102_04,GATTCAGC,iTru5_03_B,TGAGCTGT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKM3.640197 1,1_SKM4_640180,1_SKM4_640180,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},A9,iTru7_101_02,CTGTGTTG,iTru5_01_A,ACCGACAA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKM4.640180 1,1_SKM5_640177,1_SKM5_640177,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},A11,iTru7_101_03,TGAGGTGT,iTru5_02_A,CTTCGCAA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKM5.640177 1,1_SKM6_640187,1_SKM6_640187,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},C21,iTru7_102_08,AAGAGCCA,iTru5_07_B,GATACCTG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKM6.640187 1,1_SKM7_640188,1_SKM7_640188,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},A21,iTru7_101_08,CCTATGGT,iTru5_07_A,AAGACACC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKM7.640188 1,1_SKM8_640201,1_SKM8_640201,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},C5,iTru7_101_12,GTCCTTCT,iTru5_11_A,CAATAGCC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKM8.640201 1,1_SKM9_640192,1_SKM9_640192,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},A7,iTru7_211_01,GCTTCTTG,iTru5_124_H,AACGCCTT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},1.SKM9.640192 1,blank_30_C10,blank_30_C10,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},E19,iTru7_103_07,TGTGACTG,iTru5_06_C,AGCTACCA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.C10 1,blank_30_C11,blank_30_C11,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},E21,iTru7_103_08,CGAAGAAC,iTru5_07_C,AACCGAAC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.C11 1,blank_30_C12,blank_30_C12,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},E23,iTru7_103_09,GGTGTCTT,iTru5_08_C,ATCGCAAC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.C12 1,blank_30_C4,blank_30_C4,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},E7,iTru7_103_01,CAGCGATT,iTru5_12_B,ATCTGACC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.C4 1,blank_30_C5,blank_30_C5,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},E9,iTru7_103_02,TAGTGACC,iTru5_01_C,CACAGACT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.C5 1,blank_30_C6,blank_30_C6,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},E11,iTru7_103_03,CGAGACTA,iTru5_02_C,CACTGTAG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.C6 1,blank_30_C7,blank_30_C7,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},E13,iTru7_103_04,GACATGGT,iTru5_03_C,CACAGGAA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.C7 1,blank_30_C8,blank_30_C8,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},E15,iTru7_103_05,GCATGTCT,iTru5_04_C,CCATGAAC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.C8 1,blank_30_C9,blank_30_C9,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},E17,iTru7_103_06,ACTCCATC,iTru5_05_C,GCCAATAC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.C9 1,blank_30_D1,blank_30_D1,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},G1,iTru7_103_10,AAGAAGGC,iTru5_09_C,GTTGCTGT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.D1 1,blank_30_D10,blank_30_D10,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},G19,iTru7_104_07,TTAGGTCG,iTru5_06_D,TCGACAAG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.D10 1,blank_30_D11,blank_30_D11,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},G21,iTru7_104_08,GCAAGATC,iTru5_07_D,GCTGAATC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.D11 1,blank_30_D12,blank_30_D12,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},G23,iTru7_104_09,AGAGCCTT,iTru5_08_D,AGTTGTGC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.D12 1,blank_30_D2,blank_30_D2,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},G3,iTru7_103_11,AGGTTCGA,iTru5_10_C,TCTAGTCC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.D2 1,blank_30_D3,blank_30_D3,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},G5,iTru7_103_12,CATGTTCC,iTru5_11_C,GACGAACT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.D3 1,blank_30_D4,blank_30_D4,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},G7,iTru7_104_01,GTGCCATA,iTru5_12_C,TTCGTACG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.D4 1,blank_30_D5,blank_30_D5,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},G9,iTru7_104_02,CCTTGTAG,iTru5_01_D,CGACACTT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.D5 1,blank_30_D6,blank_30_D6,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},G11,iTru7_104_03,GCTGGATT,iTru5_02_D,AGACGCTA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.D6 1,blank_30_D7,blank_30_D7,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},G13,iTru7_104_04,TAACGAGG,iTru5_03_D,TGACAACC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.D7 1,blank_30_D8,blank_30_D8,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},G15,iTru7_104_05,ATGGTTGC,iTru5_04_D,GGTACTTC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.D8 1,blank_30_D9,blank_30_D9,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},G17,iTru7_104_06,CCTATACC,iTru5_05_D,CTGTATGC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.D9 1,blank_30_E1,blank_30_E1,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},I1,iTru7_104_10,GCAATGGA,iTru5_09_D,TGTCGACT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.E1 1,blank_30_E10,blank_30_E10,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},I19,iTru7_105_07,TGGCATGT,iTru5_06_E,TATGACCG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.E10 1,blank_30_E11,blank_30_E11,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},I21,iTru7_105_08,AGAAGCGT,iTru5_07_E,AGCTAGTG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.E11 1,blank_30_E12,blank_30_E12,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},I23,iTru7_105_09,AGCGGAAT,iTru5_08_E,GAACGAAG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.E12 1,blank_30_E2,blank_30_E2,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},I3,iTru7_104_11,CTGGAGTA,iTru5_10_D,AAGGCTCT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.E2 1,blank_30_E3,blank_30_E3,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},I5,iTru7_104_12,GAACATCG,iTru5_11_D,CCTAACAG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.E3 1,blank_30_E4,blank_30_E4,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},I7,iTru7_105_01,GCACAACT,iTru5_12_D,AAGACGAG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.E4 1,blank_30_E5,blank_30_E5,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},I9,iTru7_105_02,TTCTCTCG,iTru5_01_E,GACTTGTG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.E5 1,blank_30_E6,blank_30_E6,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},I11,iTru7_105_03,AACGGTCA,iTru5_02_E,CAACTCCA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.E6 1,blank_30_E7,blank_30_E7,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},I13,iTru7_105_04,ACAGACCT,iTru5_03_E,TGTTCCGT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.E7 1,blank_30_E8,blank_30_E8,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},I15,iTru7_105_05,TCTCTTCC,iTru5_04_E,ACCGCTAT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.E8 1,blank_30_E9,blank_30_E9,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},I17,iTru7_105_06,AGTGTTGG,iTru5_05_E,CTTAGGAC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.E9 1,blank_30_F1,blank_30_F1,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},K1,iTru7_105_10,TAACCGGT,iTru5_09_E,CGTCTAAC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.F1 1,blank_30_F10,blank_30_F10,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},K19,iTru7_106_07,CGTCTTGT,iTru5_06_F,AGCCAACT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.F10 1,blank_30_F11,blank_30_F11,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},K21,iTru7_106_08,CGTGATCA,iTru5_07_F,CTAGCTCA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.F11 1,blank_30_F12,blank_30_F12,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},K23,iTru7_106_09,CCAAGTTG,iTru5_08_F,GGAAGAGA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.F12 1,blank_30_F2,blank_30_F2,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},K3,iTru7_105_11,CATGGAAC,iTru5_10_E,AACCAGAG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.F2 1,blank_30_F3,blank_30_F3,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},K5,iTru7_105_12,ATGGTCCA,iTru5_11_E,CGCCTTAT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.F3 1,blank_30_F4,blank_30_F4,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},K7,iTru7_106_01,CTTCTGAG,iTru5_12_E,CTCGTTCT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.F4 1,blank_30_F5,blank_30_F5,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},K9,iTru7_106_02,AACCGAAG,iTru5_01_F,GTGAGACT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.F5 1,blank_30_F6,blank_30_F6,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},K11,iTru7_106_03,TTCGTACC,iTru5_02_F,AACACGCT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.F6 1,blank_30_F7,blank_30_F7,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},K13,iTru7_106_04,CTGTTAGG,iTru5_03_F,CCTAGAGA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.F7 1,blank_30_F8,blank_30_F8,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},K15,iTru7_106_05,CACAAGTC,iTru5_04_F,TTCCAGGT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.F8 1,blank_30_F9,blank_30_F9,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},K17,iTru7_106_06,TCTTGACG,iTru5_05_F,TCAGCCTT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.F9 1,blank_30_G1,blank_30_G1,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},M1,iTru7_106_10,GTACCTTG,iTru5_09_F,AACACTGG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.G1 1,blank_30_G10,blank_30_G10,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},M19,iTru7_107_07,CCGACTAT,iTru5_06_G,GATCTTGC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.G10 1,blank_30_G11,blank_30_G11,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},M21,iTru7_107_08,AGCTAACC,iTru5_07_G,GTTAAGCG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.G11 1,blank_30_G12,blank_30_G12,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},M23,iTru7_107_09,GCCTTGTT,iTru5_08_G,GTCATCGT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.G12 1,blank_30_G2,blank_30_G2,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},M3,iTru7_106_11,GACTATGC,iTru5_10_F,ACTATCGC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.G2 1,blank_30_G3,blank_30_G3,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},M5,iTru7_106_12,TGGATCAC,iTru5_11_F,ACAACAGC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.G3 1,blank_30_G4,blank_30_G4,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},M7,iTru7_107_01,CTCTGGTT,iTru5_12_F,TGTGGCTT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.G4 1,blank_30_G5,blank_30_G5,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},M9,iTru7_107_02,GTTCATGG,iTru5_01_G,GTTCCATG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.G5 1,blank_30_G6,blank_30_G6,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},M11,iTru7_107_03,GCTGTAAG,iTru5_02_G,TGGATGGT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.G6 1,blank_30_G7,blank_30_G7,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},M13,iTru7_107_04,GTCGAAGA,iTru5_03_G,GCATAACG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.G7 1,blank_30_G8,blank_30_G8,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},M15,iTru7_107_05,GAGCTCAA,iTru5_04_G,TCGAACCT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.G8 1,blank_30_G9,blank_30_G9,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},M17,iTru7_107_06,TGAACCTG,iTru5_05_G,ACATGCCA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.G9 1,blank_30_H1,blank_30_H1,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},O1,iTru7_107_10,AACTTGCC,iTru5_09_G,TCAGACAC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.H1 1,blank_30_H10,blank_30_H10,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},O19,iTru7_108_07,GAAGTACC,iTru5_06_H,CCTCGTTA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.H10 1,blank_30_H11,blank_30_H11,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},O21,iTru7_108_08,CAGGTATC,iTru5_07_H,CGATTGGA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.H11 1,blank_30_H12,blank_30_H12,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},O23,iTru7_108_09,TCTCTAGG,iTru5_08_H,CCAACGAA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.H12 1,blank_30_H2,blank_30_H2,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},O3,iTru7_107_11,CAATGTGG,iTru5_10_G,GTCCTAAG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.H2 1,blank_30_H3,blank_30_H3,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},O5,iTru7_107_12,AAGGCTGA,iTru5_11_G,AGACCTTG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.H3 1,blank_30_H4,blank_30_H4,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},O7,iTru7_108_01,TTACCGAG,iTru5_12_G,AGACATGC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.H4 1,blank_30_H5,blank_30_H5,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},O9,iTru7_108_02,GTCCTAAG,iTru5_01_H,TAGCTGAG,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.H5 1,blank_30_H6,blank_30_H6,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},O11,iTru7_108_03,GAAGGTTC,iTru5_02_H,TTCGAAGC,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.H6 1,blank_30_H7,blank_30_H7,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},O13,iTru7_108_04,GAAGAGGT,iTru5_03_H,CAGTGCTT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.H7 1,blank_30_H8,blank_30_H8,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},O15,iTru7_108_05,TCTGAGAG,iTru5_04_H,TAGTGCCA,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.H8 1,blank_30_H9,blank_30_H9,Test Shotgun Library \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},O17,iTru7_108_06,ACCGCATA,iTru5_05_H,GATGGAGT,New sequencing run \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{6},blank.30.H9""" # noqa if __name__ == '__main__': integration_tests() ```
[ { "content": "Here is the code content:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Import smtplib for the actual sending function\nimport sys, os\nimport smtplib\nimport email\n# Import the email modules we'll need\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIME...
[ { "content": "Here is the code content:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Import smtplib for the actual sending function\nimport sys, os\nimport smtplib\nimport email\n# Import the email modules we'll need\nfrom email.mime.text import MIMEText\nfrom email.mime.multi...
```python #!/usr/bin/env python # -*- coding: utf-8 -*- # Import smtplib for the actual sending function import sys, os import smtplib import email # Import the email modules we'll need from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from email.header import Header def sendEmail(fromEmail="", password=None, toEmails=[], smtp="smtp.gmail.com",\ port=25, msg=""): server = smtplib.SMTP(smtp, port) server.ehlo() if password!=None: server.starttls() server.login(fromEmail, password) server.sendmail(fromEmail, toEmails, msg.as_string()) server.close() if __name__=='__main__': pass """fromEmail = "noreply@malmo.se" #"johanlahti@gmail.com" password = None smtp = "mail2.malmo.se" port = 25 toEmails = ["johan.lahti@malmo.se"] subject = "Testar ÅÄÖ åäö" content = "ÅÄÖ åäö Nu testar jag skicka en länk...\n\n/Johan" msg = MIMEText(content, "plain", "utf-8") msg['Subject'] = subject msg['From'] = fromEmail msg['To'] = ";".join(toEmails) sendEmail(fromEmail, password, \ toEmails=toEmails, msg=msg, \ smtp=smtp, port=port)""" ```
[ { "content": "Here is the snippet:\n```python\n# copyright 2016 Siavoosh Payandeh Azad and Behrad Niazmand\nimport package_file\nimport copy\n\n\ndef extract_checker_info(name_string):\n package_file.list_of_detection_info_sa0[name_string] = []\n package_file.list_of_detection_info_sa1[name_string] = []\n...
[ { "content": "Here is the snippet:\n<|memory_start|>```python\n# copyright 2016 Siavoosh Payandeh Azad and Behrad Niazmand\nimport package_file\nimport copy\n\n\ndef extract_checker_info(name_string):\n package_file.list_of_detection_info_sa0[name_string] = []\n package_file.list_of_detection_info_sa1[nam...
```python # copyright 2016 Siavoosh Payandeh Azad and Behrad Niazmand import package_file import copy def extract_checker_info(name_string): package_file.list_of_detection_info_sa0[name_string] = [] package_file.list_of_detection_info_sa1[name_string] = [] package_file.list_of_true_misses_sa0[name_string] = [] package_file.list_of_true_misses_sa1[name_string] = [] area_report_file = open("coverage_results/fstat" + str(name_string), 'r') line = area_report_file.readline() while line != "": line = area_report_file.readline() if ".CHECKERS DETECTION INFO - amount of detections" in line: line = area_report_file.readline() for item in line.split(" "): if "|" in item: package_file.list_of_detection_info_sa0[name_string].append(item.split("|")[0]) package_file.list_of_detection_info_sa1[name_string].append(item.split("|")[1]) if "amount of True Misses" in line: line = area_report_file.readline() for item in line.split(" "): if "|" in item: package_file.list_of_true_misses_sa0[name_string].append(item.split("|")[0]) package_file.list_of_true_misses_sa1[name_string].append(item.split("|")[1]) # print package_file.list_of_detection_info_sa0 # print package_file.list_of_detection_info_sa1 return None def find_essential_checker(): print "------------------------------------------------------------------------------------------------" print " Extracting essential checkers" print "------------------------------------------------------------------------------------------------" temp_copy_sa0 = copy.deepcopy(package_file.list_of_true_misses_sa0) temp_copy_sa1 = copy.deepcopy(package_file.list_of_true_misses_sa1) random_item = temp_copy_sa0.keys()[0] selected_checkers_sa0 = [] selected_checkers_sa1 = [] checkers_for_optimization = [] for node in range(0, len(temp_copy_sa0[random_item])): best_checker = None best_true_miss_rate = float('inf') for checker in temp_copy_sa0: true_miss_rate = int(temp_copy_sa0[checker][node]) if int(package_file.list_of_detection_info_sa0[str(checker)][node]) > 0: if true_miss_rate >= 0: if true_miss_rate < best_true_miss_rate: best_true_miss_rate = true_miss_rate best_checker = checker # if best_true_miss_rate == 0: count = 0 for checker in temp_copy_sa0: if int(package_file.list_of_true_misses_sa0[checker][node]) == best_true_miss_rate: if int(package_file.list_of_detection_info_sa0[str(checker)][node]) > 0: temp_copy_sa0[checker][node] = 1 count += 1 else: temp_copy_sa0[checker][node] = 0 else: temp_copy_sa0[checker][node] = 0 if count == 1: if best_checker not in selected_checkers_sa0: selected_checkers_sa0.append(best_checker) # else: # for checker in temp_copy_sa0: # temp_copy_sa0[checker][node] = 0 print "single dominant checkers for sta0:", selected_checkers_sa0 for node in range(0, len(temp_copy_sa1[random_item])): best_checker = None best_true_miss_rate = float('inf') for checker in temp_copy_sa1: true_miss_rate = int(temp_copy_sa1[checker][node]) if int(package_file.list_of_detection_info_sa1[str(checker)][node]) > 0: # print checker, int(package_file.list_of_detection_info_sa1[str(checker)][node]) if true_miss_rate >= 0: if true_miss_rate < best_true_miss_rate: best_true_miss_rate = true_miss_rate best_checker = checker # if best_true_miss_rate == 0: count = 0 for checker in temp_copy_sa1: if int(package_file.list_of_true_misses_sa1[checker][node]) == best_true_miss_rate: if int(package_file.list_of_detection_info_sa1[str(checker)][node]) > 0: temp_copy_sa1[checker][node] = 1 count += 1 else: temp_copy_sa1[checker][node] = 0 else: temp_copy_sa1[checker][node] = 0 # print "best checker", best_checker if count == 1: if best_checker not in selected_checkers_sa1: selected_checkers_sa1.append(best_checker) # else: # for checker in temp_copy_sa1: # temp_copy_sa1[checker][node] = 0 print "single dominant checkers for sta1:", selected_checkers_sa1 for checker in selected_checkers_sa0: for node in range(0, len(temp_copy_sa0[checker])): if temp_copy_sa0[checker][node] == 1: for checker2 in temp_copy_sa0.keys(): if checker2 not in selected_checkers_sa0: if temp_copy_sa0[checker2][node] == 1: temp_copy_sa0[checker2][node] = 0 for checker in selected_checkers_sa1: for node in range(0, len(temp_copy_sa1[checker])): if temp_copy_sa1[checker][node] == 1: for checker2 in temp_copy_sa1.keys(): if checker2 not in selected_checkers_sa1: if temp_copy_sa1[checker2][node] == 1: temp_copy_sa1[checker2][node] = 0 if package_file.debug: print "-----------------" print "printing the checkers true misses table (0-1)" print "stuck at 0:" for checker in sorted(temp_copy_sa0.keys()): print checker, for item in temp_copy_sa0[checker]: print item, print "" print "-----------------" print "printing the checkers true misses table (0-1)" print "stuck at 1:" for checker in sorted(temp_copy_sa1.keys()): print checker, for item in temp_copy_sa1[checker]: print item, print "" print "-----------------" final_selected_list = [] for item in selected_checkers_sa0: final_selected_list.append(str(item)) for item in selected_checkers_sa1: if item not in selected_checkers_sa0: final_selected_list.append(str(item)) for item in temp_copy_sa0.keys(): if str(item) not in final_selected_list: if str(item) not in checkers_for_optimization: checkers_for_optimization.append(str(item)) print "selected single dominant checkers:", final_selected_list print "selected checkers for optimization:", checkers_for_optimization return final_selected_list, checkers_for_optimization ```