text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""JSON-L parser plugin for Azure application gateway access log files."""
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.parsers import jsonl_parser
from plaso.parsers.jsonl_plugins import interface
class AzureApplicationGatewayAccessEventData(events.EventData):
"""Azure application gateway access log event data.
Attributes:
client_ip (str): Client IP address of the request.
client_port (int): Client TCP/UDP port for the request.
client_response_time (int): Duration, in seconds, from the first byte of
a client request to be processed up to the first byte sent as response
to the client.
host (str): Address listed in the host header of the request. If rewritten
using header rewrite, contains the updated host name.
http_method (str): HTTP method used by the request.
http_status (int): HTTP status code returned to the client from application
gateway.
http_version (str): HTTP version of the request.
instance_identifier (str): Application gateway instance that served
the request.
original_host (str): Original request host name.
original_request_uri (str): Original request URL, including arguments.
received_bytes (int): Size of packet received, in bytes.
recorded_time (dfdatetime.DateTimeValues): date and time the log entry
was recorded.
request_query (str): Server-Routed: Back-end pool instance that was sent
the request. X-AzureApplicationGateway-LOG-ID: Correlation ID used for
the request. It can be used to troubleshoot traffic issues on
the back-end servers. SERVER-STATUS: HTTP response code that application
gateway received from the back-end.
request_uri (str): URI of the received request.
sent_bytes (int): Size of packet sent, in bytes.
server_response_latency (str): Latency of the response (in seconds) from
the back-end server.
server_routed (str): The back-end server that application gateway routes
the request to.
server_status (str): HTTP status code of the back-end server.
ssl_cipher (str): Cipher suite being used for TLS communication.
ssl_client_certificate_fingerprint (str): Fingerprint of the SSL client
certificate.
ssl_client_certificate_issuer_name (str): Name of the issuer of the SSL
client certificate.
ssl_client_verify (str): TODO.
ssl_enabled (str): Whether communication to the back-end pools used TLS.
Valid values are on and off.
ssl_protocol (str): The SSL/TLS protocol used.
time_taken (double): Duration, in seconds, that it takes for the first byte
of a client request to be processed and its last-byte sent in
the response to the client. It's important to note that the Time-Taken
field usually includes the time that the request and response packets
are traveling over the network.
transaction_id (str): Unique identifier to correlate the request received
from the client
user_agent (str): User agent from the HTTP request header.
waf_evaluation_time (str): Duration, in seconds, that it takes for
the request to be processed by the WAF.
waf_mode (str): Value can be either Detection or Prevention.
"""
DATA_TYPE = 'azure:application_gateway_access:entry'
def __init__(self):
"""Initializes event data."""
super(AzureApplicationGatewayAccessEventData, self).__init__(
data_type=self.DATA_TYPE)
self.client_ip = None
self.client_port = None
self.client_response_time = None
self.host = None
self.http_method = None
self.http_status = None
self.http_version = None
self.instance_identifier = None
self.original_host = None
self.original_request_uri = None
self.received_bytes = None
self.recorded_time = None
self.request_query = None
self.request_uri = None
self.sent_bytes = None
self.server_response_latency = None
self.server_routed = None
self.server_status = None
self.ssl_cipher = None
self.ssl_client_certificate_fingerprint = None
self.ssl_client_certificate_issuer_name = None
self.ssl_client_verify = None
self.ssl_enabled = None
self.ssl_protocol = None
self.time_taken = None
self.transaction_identifier = None
self.user_agent = None
self.waf_evaluation_time = None
self.waf_mode = None
class AzureApplicationGatewayAccessLogJSONLPlugin(interface.JSONLPlugin):
"""JSON-L parser plugin for Azure application gateway access log files."""
NAME = 'azure_application_gateway_access_log'
DATA_FORMAT = 'Azure Application Gateway access log'
def _ParseRecord(self, parser_mediator, json_dict):
"""Parses an Azure application gateway access log record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfVFS.
json_dict (dict): JSON dictionary of the log record.
"""
properties_json_dict = self._GetJSONValue(
json_dict, 'properties', default_value={})
event_data = AzureApplicationGatewayAccessEventData()
event_data.client_ip = self._GetJSONValue(
properties_json_dict, 'clientIP')
event_data.client_port = self._GetJSONValue(
properties_json_dict, 'clientPort')
event_data.client_response_time = self._GetJSONValue(
properties_json_dict, 'clientResponseTime')
event_data.host = self._GetJSONValue(
properties_json_dict, 'host')
event_data.http_method = self._GetJSONValue(
properties_json_dict, 'httpMethod')
event_data.http_status = self._GetJSONValue(
properties_json_dict, 'httpStatus')
event_data.http_version = self._GetJSONValue(
properties_json_dict, 'httpVersion')
event_data.instance_identifier = self._GetJSONValue(
properties_json_dict, 'instanceId')
event_data.original_host = self._GetJSONValue(
properties_json_dict, 'originalHost')
event_data.original_request_uri = self._GetJSONValue(
properties_json_dict, 'originalRequestUriWithArgs')
event_data.received_bytes = self._GetJSONValue(
properties_json_dict, 'receivedBytes')
event_data.recorded_time = self._ParseISO8601DateTimeString(
parser_mediator, json_dict, 'timeStamp')
event_data.request_query = self._GetJSONValue(
properties_json_dict, 'requestQuery')
event_data.request_uri = self._GetJSONValue(
properties_json_dict, 'requestUri')
event_data.sent_bytes = self._GetJSONValue(
properties_json_dict, 'sentBytes')
event_data.server_response_latency = self._GetJSONValue(
properties_json_dict, 'serverResponseLatency')
event_data.server_routed = self._GetJSONValue(
properties_json_dict, 'serverRouted')
event_data.server_status = self._GetJSONValue(
properties_json_dict, 'serverStatus')
event_data.ssl_cipher = self._GetJSONValue(
properties_json_dict, 'sslCipher')
event_data.ssl_client_certificate_fingerprint = self._GetJSONValue(
properties_json_dict, 'sslClientCertificateFingerprint')
event_data.ssl_client_certificate_issuer_name = self._GetJSONValue(
properties_json_dict, 'sslClientCertificateIssuerName')
event_data.ssl_client_verify = self._GetJSONValue(
properties_json_dict, 'sslClientVerify')
event_data.ssl_enabled = self._GetJSONValue(
properties_json_dict, 'sslEnabled')
event_data.ssl_protocol = self._GetJSONValue(
properties_json_dict, 'sslProtocol')
event_data.time_taken = self._GetJSONValue(
properties_json_dict, 'timeTaken')
event_data.transaction_identifier = self._GetJSONValue(
properties_json_dict, 'transactionId')
event_data.user_agent = self._GetJSONValue(
properties_json_dict, 'userAgent')
event_data.waf_evaluation_time = self._GetJSONValue(
properties_json_dict, 'WAFEvaluationTime')
event_data.waf_mode = self._GetJSONValue(
properties_json_dict, 'WAFMode')
parser_mediator.ProduceEventData(event_data)
def CheckRequiredFormat(self, json_dict):
"""Check if the log record has the minimal structure required by the plugin.
Args:
json_dict (dict): JSON dictionary of the log record.
Returns:
bool: True if this is the correct parser, False otherwise.
"""
operation_name = self._GetJSONValue(json_dict, 'operationName')
properties = self._GetJSONValue(json_dict, 'properties')
iso8601_string = self._GetJSONValue(json_dict, 'timeStamp')
if (None in (operation_name, properties, iso8601_string) or
operation_name != 'ApplicationGatewayAccess'):
return False
date_time = dfdatetime_time_elements.TimeElementsInMicroseconds()
try:
date_time.CopyFromStringISO8601(iso8601_string)
except ValueError:
return False
return True
jsonl_parser.JSONLParser.RegisterPlugin(
AzureApplicationGatewayAccessLogJSONLPlugin)
|
{
"content_hash": "c5c927c920f1ad3d6cfb0595870f3cf8",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 80,
"avg_line_length": 43.17142857142857,
"alnum_prop": 0.7080300022060446,
"repo_name": "log2timeline/plaso",
"id": "f0715908d84f226ee58199479c5a77c87b8c6886",
"size": "9090",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "plaso/parsers/jsonl_plugins/azure_application_gateway_log.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4301"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1305"
},
{
"name": "Python",
"bytes": "5345186"
},
{
"name": "Shell",
"bytes": "27279"
},
{
"name": "YARA",
"bytes": "507"
}
],
"symlink_target": ""
}
|
import sys
from mpi4py import MPI
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_MPI import controller_MPI
from pySDC.implementations.problem_classes.HeatEquation_ND_FD import heatNd_unforced
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.transfer_classes.TransferMesh import mesh_to_mesh
def set_parameters_ml():
"""
Helper routine to set parameters for the following multi-level runs
Returns:
dict: dictionary containing the simulation parameters
dict: dictionary containing the controller parameters
float: starting time
float: end time
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 5e-10
level_params['dt'] = 0.125 / 2.0
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [5]
sweeper_params['QI'] = 'LU'
# initialize problem parameters
problem_params = dict()
problem_params['nu'] = 0.1 # diffusion coefficient
problem_params['freq'] = (2, 2) # frequency for the test value
problem_params['bc'] = 'periodic' # periodic BCs
problem_params['nvars'] = [(256, 256), (128, 128)] # number of degrees of freedom for each level
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 6
space_transfer_params['periodic'] = True
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = heatNd_unforced # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
# set time parameters
t0 = 0.0
Tend = 1.0
return description, controller_params, t0, Tend
if __name__ == "__main__":
"""
A simple test program to do MPI-parallel PFASST runs
"""
# set MPI communicator
comm = MPI.COMM_WORLD
# get parameters from Part A
description, controller_params, t0, Tend = set_parameters_ml()
controller = controller_MPI(controller_params=controller_params, description=description, comm=comm)
# get initial values on finest level
P = controller.S.levels[0].prob
uinit = P.u_exact(t0)
# call main functions to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by type (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
# combine statistics into list of statistics
iter_counts_list = comm.gather(iter_counts, root=0)
rank = comm.Get_rank()
size = comm.Get_size()
if rank == 0:
# we'd need to deal with variable file names here (for testing purpose only)
if len(sys.argv) >= 3:
fname = sys.argv[2]
else:
fname = 'step_6_B_out.txt'
f = open(fname, 'a')
out = 'Working with %2i processes...' % size
f.write(out + '\n')
print(out)
# compute exact solutions and compare with both results
uex = P.u_exact(Tend)
err = abs(uex - uend)
out = 'Error classic: %12.8e' % err
f.write(out + '\n')
print(out)
# build one list of statistics instead of list of lists, the sort by time
iter_counts_gather = [item for sublist in iter_counts_list for item in sublist]
iter_counts = sorted(iter_counts_gather, key=lambda tup: tup[0])
# compute and print statistics
for item in iter_counts:
out = 'Number of iterations for time %4.2f: %1i ' % (item[0], item[1])
f.write(out + '\n')
print(out)
f.write('\n')
print()
|
{
"content_hash": "5da7f4bd9215755cd9ca9db3c3c58cc9",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 104,
"avg_line_length": 34.097744360902254,
"alnum_prop": 0.6584343991179713,
"repo_name": "Parallel-in-Time/pySDC",
"id": "0ac55bcaf8eee380276818e288fd366ce49dc04e",
"size": "4535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pySDC/playgrounds/parallel/playground_parallelization.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "4264000"
},
{
"name": "Python",
"bytes": "2450453"
},
{
"name": "Shell",
"bytes": "18105"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Idiorm'
copyright = u'2014, Jamie Matthews and Simon Holywell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Idiormdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Idiorm.tex', u'Idiorm Documentation',
u'Jamie Matthews and Simon Holywell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'idiorm', u'Idiorm Documentation',
[u'Jamie Matthews and Simon Holywell'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Idiorm', u'Idiorm Documentation',
u'Jamie Matthews and Simon Holywell', 'Idiorm', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
{
"content_hash": "e0e3bdd416f1143fd44e12f923e52390",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 86,
"avg_line_length": 32.19650655021834,
"alnum_prop": 0.7022921470229214,
"repo_name": "Prezto/Shopr",
"id": "87e17925ff5aff4f4d8c289a2c9409ca63ea40be",
"size": "7790",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "system/vendor/j4mie/idiorm/docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "354831"
},
{
"name": "HTML",
"bytes": "6675"
},
{
"name": "JavaScript",
"bytes": "257662"
},
{
"name": "PHP",
"bytes": "21679"
}
],
"symlink_target": ""
}
|
import collections
import copy
import os
import random
import shutil
import abc
import numpy as np
import librosa
from spych import data
from spych.audio import signal
from spych.utils import naming
class DatasetBase(metaclass=abc.ABCMeta):
"""
Defines the base interface for an audio dataset.
"""
@property
@abc.abstractmethod
def name(self):
""" Return the name of the dataset (Equals basename of the path, if not None). """
return "undefined"
@property
@abc.abstractmethod
def files(self):
""" Return a dictionary containing file-objs with the file-id as key. """
return {}
@property
@abc.abstractmethod
def utterances(self):
""" Return a dictionary containing utterance-objs with the utterance-id as key. """
return {}
@property
@abc.abstractmethod
def segmentations(self):
""" Return a dictionary of dictionaries containing segmentation key/obj with the utterance-id as key. """
return {}
@property
@abc.abstractmethod
def speakers(self):
""" Return a dictionary containing speaker-objs with the speaker-id as key. """
return {}
@property
@abc.abstractmethod
def features(self):
""" Return a dictionary containing feature-containers with the feature-name as key. """
return {}
#
# Files
#
@property
def num_files(self):
""" Return number of files. """
return len(self.files)
def add_random_noise(self, snr=None, snr_range=None):
"""
Adds generated noise to all files in the dataset with the given SNR.
:param snr: Signal-to-Noise-Ratio [dB]
:param snr_range: Uses a random Signal-to-Noise-Ratio [dB] in the given range (start,end)
"""
for file in self.files.values():
used_snr = snr
if snr_range is not None:
used_snr = random.randint(snr_range[0], snr_range[1])
signal.add_random_noise_to_wav(file.path, file.path, snr=used_snr)
#
# Utterances
#
@property
def num_utterances(self):
""" Return number of utterances. """
return len(self.utterances)
def utterances_in_file(self, file_idx):
""" Return all utterances that are in the given file. """
utterances = set()
for utt in self.utterances.values():
if utt.file_idx == file_idx:
utterances.add(utt)
return utterances
def utterances_of_speaker(self, speaker_idx):
""" Returns all utterances of the given speaker. """
utterances = set()
for utt in self.utterances.values():
if utt.speaker_idx == speaker_idx:
utterances.add(utt)
return utterances
def speaker_to_utterance_dict(self):
""" Return a dict with speaker to utterances mapping. """
spk2utt = collections.defaultdict(list)
for utt in self.utterances.values():
spk = self.speakers[utt.speaker_idx]
spk2utt[spk].append(utt)
return spk2utt
def speaker_of_utterance(self, utt_idx):
""" Return the speaker of the given utterance. """
if isinstance(utt_idx, data.Utterance):
return self.speakers[utt_idx.speaker_idx]
else:
return self.speakers[self.utterances[utt_idx].speaker_idx]
def read_utterance_data(self, utterance_idx, without_start_end_silence=False, word_alignment_key=None, dtype=np.float32):
"""
Read the audio signal for the given utterance. This uses librosa.core.load.
:param utterance_idx: Utterance-Id to read signal for.
:param without_start_end_silence: If True tries to cut off start and end silence based on word alignment.
:param word_alignment_key: Key of the segmentation with the word alignment for silence cutoff.
:return: tuple (nd-array samples, sampling-rate)
"""
utt = self.utterances[utterance_idx]
file_path = self.files[utt.file_idx].path
start = utt.start
end = utt.end
if without_start_end_silence:
if word_alignment_key is None:
raise ValueError('You have to provide a key pointing to the word alignment segmentations to read without start/end silences.')
seg = self.segmentations[utterance_idx][word_alignment_key]
start += seg.first_segment.start
end = utt.start + seg.last_segment.end
if end != data.Utterance.END_FULL_FILE:
samples, sampling_rate = librosa.core.load(file_path, sr=None, offset=start, duration=end - start, dtype=dtype)
else:
samples, sampling_rate = librosa.core.load(file_path, sr=None, offset=start, dtype=dtype)
return samples, sampling_rate
#
# Speakers
#
@property
def num_speakers(self):
""" Return the number of speakers in the dataset. """
return len(self.speakers)
#
# Segmentations
#
@property
def all_segmentation_keys(self):
""" Return a set of all occuring segmentation keys. """
keys = set()
for utt_idx, segmentations in self.segmentations.items():
keys.update(segmentations.keys())
return keys
def all_segmentations_with_key(self, key):
""" Return a set of all occurring segmentations with the given key. """
raise NotImplementedError('Not yet implemented!')
def num_segmentations_for_utterance(self, utterance_idx):
""" Return the number of segmentations, the given utterance contains. """
return len(self.segmentations[utterance_idx])
#
# Features
#
def get_features(self, utterance_idx, feature_container):
""" Return the features (np array) for the given utterance of the given container. """
if feature_container in self.features.keys():
with self.features[feature_container] as fc:
return fc.get(utterance_idx)
class Dataset(DatasetBase):
"""
Represents an audio dataset.
Notes on paths:
All paths stored in the dataset object (audio files, features) are absolute.
:param path: Path to a directory on the filesystem, which acts as root folder for the dataset.
If no path is given the dataset cannot be saved on disk.
:param loader: This object is used to save the dataset. By default :class:`spych.data.dataset.io.SpychDatasetLoader` is used.
:type loader: :class:`spych.data.dataset.io.DatasetLoader`
"""
_default_file_folder = 'audio_files'
def __init__(self, path=None, loader=None):
self.path = path
if loader is None:
from spych.data.dataset.io import spych
self.loader = spych.SpychDatasetLoader()
else:
self.loader = loader
self._files = {}
self._utterances = {}
self._segmentations = collections.defaultdict(dict)
self._speakers = {}
self.subviews = {}
self._features = {}
@property
def files(self):
return self._files
@property
def utterances(self):
return self._utterances
@property
def segmentations(self):
return self._segmentations
@property
def speakers(self):
return self._speakers
@property
def features(self):
return self._features
@property
def name(self):
"""
Get the name of the dataset (Equals basename of the path, if not None.)
:return: name
"""
if self.path is None:
return "undefined"
else:
return os.path.basename(os.path.abspath(self.path))
def save(self):
""" Save this dataset at self.path. """
if self.path is None:
raise ValueError('No path given to save the dataset.')
self.save_at(self.path)
def save_at(self, path, loader=None, copy_files=False):
"""
Save this dataset at the given path. If the path differs from the current path set, the path gets updated.
:param path: Path to save the dataset to.
:param loader: If you want to use another loader (e.g. to export to another format). Otherwise it uses the loader associated with this dataset.
:param copy_files: If true the files are also stored in the new path, if not already there.
"""
if loader is None:
self.loader.save(self, path, copy_files=copy_files)
elif type(loader) == str:
from ..dataset import io
loader = io.create_loader_of_type(loader)
loader.save(self, path, copy_files=copy_files)
else:
loader.save(self, path, copy_files=copy_files)
self.path = path
@classmethod
def load(cls, path, loader=None):
""" Loads the dataset from the given path, using the given loader. If no loader is given the spych loader is used. """
if loader is None:
from ..dataset import io
loader = io.SpychDatasetLoader()
elif type(loader) == str:
from ..dataset import io
loader = io.create_loader_of_type(loader)
return loader.load(path)
#
# Subview
#
@property
def num_subviews(self):
""" Return number of subviews. """
return len(self.subviews)
def add_subview(self, name, subview):
""" Add the subview to this dataset. """
subview.dataset = self
self.subviews[name] = subview
def export_subview(self, name):
""" Return a subview as a standalone dataset. """
sv = self.subviews[name]
exported_set = Dataset(path=self.path)
exported_set._files = copy.deepcopy(sv.files)
exported_set._utterances = copy.deepcopy(sv.utterances)
exported_set._speakers = copy.deepcopy(sv.speakers)
exported_set._segmentations = copy.deepcopy(sv.segmentations)
exported_set._features = copy.deepcopy(sv.features)
return exported_set
#
# File
#
def import_file(self, file, copy_file=False):
""" Import a copy the given file and return the new file obj. """
return self.add_file(file.path, file_idx=file.idx, copy_file=copy_file)
def add_file(self, path, file_idx=None, copy_file=False):
"""
Adds a new file to the dataset.
:param path: Path of the file to add.
:param file_idx: The id to associate the file with. If None or already exists, one is generated.
:param copy_file: If True the file is copied to the dataset folder, otherwise the given path is used directly.
:return: File object
"""
if copy_file and not os.path.isdir(self.path):
raise ValueError('No path defined for this dataset, cannot copy files.')
if file_idx is None or file_idx in self.files.keys():
final_file_idx = naming.generate_name(length=15, not_in=self.files.keys())
else:
final_file_idx = file_idx
if copy_file:
file_folder = os.path.join(self.path, self._default_file_folder)
basename = os.path.basename(path)
final_file_path = os.path.join(self._default_file_folder, basename)
full_path = os.path.join(self.path, final_file_path)
while os.path.exists(full_path):
final_file_path = os.path.join(self._default_file_folder, '{}.wav'.format(naming.generate_name(15)))
full_path = os.path.abspath(os.path.join(self.path, final_file_path))
if not os.path.isdir(file_folder):
os.makedirs(file_folder)
shutil.copy(path, full_path)
else:
if os.path.isabs(path):
final_file_path = path
else:
final_file_path = os.path.abspath(path)
file_obj = data.File(final_file_idx, final_file_path)
self.files[final_file_idx] = file_obj
return file_obj
def remove_files(self, file_ids, delete_files=False):
"""
Deletes the given wavs.
:param file_ids: List of file_idx's or fileobj's
:param delete_files: Also delete the files
"""
for file_idx in file_ids:
if type(file_idx) == data.File:
file_obj = file_idx
else:
file_obj = self.files[file_idx]
if delete_files:
path = os.path.join(self.path, file_obj.path)
if os.path.exists(path):
os.remove(path)
self.remove_utterances(self.utterances_in_file(file_idx))
del self.files[file_obj.idx]
#
# Utterance
#
def import_utterance(self, utterance):
""" Import a copy of the given utterance and return the new utterance. """
return self.add_utterance(utterance.file_idx,
utterance_idx=utterance.idx,
speaker_idx=utterance.speaker_idx,
start=utterance.start,
end=utterance.end)
def add_utterance(self, file_idx, utterance_idx=None, speaker_idx=None, start=0, end=-1):
"""
Adds a new utterance to the dataset.
:param file_idx: The file id the utterance is in.
:param utterance_idx: The id to associate with the utterance. If None or already exists, one is generated.
:param speaker_idx: The speaker id to associate with the utterance.
:param start: Start of the utterance within the file [seconds].
:param end: End of the utterance within the file [seconds]. -1 equals the end of the file.
:return: Utterance object
"""
if file_idx is None or file_idx.strip() == '':
raise ValueError('No file id given. The utterance has to be associated with a file!')
if file_idx not in self.files.keys():
raise ValueError('File with id {} does not exist!'.format(file_idx))
if speaker_idx is not None and speaker_idx not in self.speakers.keys():
raise ValueError('Speaker with id {} does not exist!'.format(speaker_idx))
if utterance_idx is None or utterance_idx in self.utterances.keys():
final_utterance_idx = naming.generate_name(length=15, not_in=self.utterances.keys())
else:
final_utterance_idx = utterance_idx
utt = data.Utterance(final_utterance_idx, file_idx, speaker_idx=speaker_idx, start=start, end=end)
self.utterances[final_utterance_idx] = utt
return utt
def remove_utterances(self, utterance_ids):
"""
Removes the given utterances by id.
:param utterance_ids: List of utterance ids
"""
for utt_id in utterance_ids:
if type(utt_id) == data.Utterance:
utt = utt_id
else:
utt = self.utterances[utt_id]
if utt.idx in self.utterances.keys():
del self.utterances[utt.idx]
if utt.idx in self.segmentations.keys():
del self.segmentations[utt.idx]
#
# Speaker
#
def import_speaker(self, speaker):
""" Import a copy of the given speaker and return the new speaker. """
spk = self.add_speaker(speaker_idx=speaker.idx, gender=speaker.gender)
spk.load_speaker_info_from_dict(speaker.get_speaker_info_dict())
return spk
def add_speaker(self, speaker_idx=None, gender=None):
"""
Adds a new speaker to the dataset.
:param speaker_idx: The id to associate the speaker with. If None or already exists, one is generated.
:param gender: Gender of the speaker.
:return: Speaker object
"""
if speaker_idx is None or speaker_idx in self.speakers.keys():
final_speaker_idx = naming.generate_name(length=15, not_in=self.speakers.keys())
else:
final_speaker_idx = speaker_idx
spk = data.Speaker(final_speaker_idx, gender=gender)
self.speakers[final_speaker_idx] = spk
return spk
#
# Segmentation
#
def import_segmentation(self, segmentation):
""" Import a copy of the given segmentation and return the new segmentation. """
return self.add_segmentation(segmentation.utterance_idx, segments=copy.deepcopy(segmentation.segments), key=segmentation.key)
def add_segmentation(self, utterance_idx, segments=None, key=None):
"""
Adds a new segmentation.
:param utterance_idx: Utterance id the segmentation is associated with.
:param segments: Segments can be a string (will be space separated into tokens) or a list of segments.
:param key: A key this segmentation is assiciated with. (If None the default key is used.)
:return: Segmentation object
"""
if utterance_idx is None or utterance_idx.strip() == '':
raise ValueError('No utterance id given. The segmentation has to be associated with an utterance!')
if utterance_idx not in self.utterances.keys():
raise ValueError('Utterance with id {} does not exist!'.format(utterance_idx))
if type(segments) == str:
segmentation_obj = data.Segmentation.from_text(segments, utterance_idx=utterance_idx, key=key)
elif type(segments) == list:
segmentation_obj = data.Segmentation(segments=segments, utterance_idx=utterance_idx, key=key)
else:
return None
self.segmentations[utterance_idx][segmentation_obj.key] = segmentation_obj
return segmentation_obj
def import_segmentation(self, segmentation):
""" Adds an existing segmentation to the dataset. Uses key and utterance-id from the segmentation object. """
if segmentation.utterance_idx is None or segmentation.utterance_idx.strip() == '':
raise ValueError('No utterance id given. The segmentation has to be associated with an utterance!')
if segmentation.utterance_idx not in self.utterances.keys():
raise ValueError('Utterance with id {} does not exist!'.format(segmentation.utterance_idx))
self.segmentations[segmentation.utterance_idx][segmentation.key] = segmentation
return segmentation
#
# FEATURES
#
def create_feature_container(self, name, path=None):
""" Create a new feature container """
if name in self.features.keys():
raise ValueError('Feature container with name {} already exists.'.format(name))
if path is None:
path = 'features_{}'.format(name)
if os.path.isabs(path):
final_feature_path = path
else:
final_feature_path = os.path.join(self.path, path)
fc = data.FeatureContainer(final_feature_path)
self.features[name] = fc
return fc
def add_features(self, utterance_idx, feature_matrix, feature_container):
"""
Adds the given features to the dataset. Features are stored directly to the filesystem, so this dataset has to have a path set.
:param utterance_idx: Utterance to which the features correspond.
:param feature_matrix: A numpy array containing the features.
:param feature_container: Name of the container to store the features in.
"""
if feature_container not in self.features.keys():
raise ValueError('No feature container with name {} exists.'.format(feature_container))
if utterance_idx is None or utterance_idx.strip() == '':
raise ValueError('No utterance id given. The features have to be associated with an utterance!')
if utterance_idx not in self.utterances.keys():
raise ValueError('Utterance with id {} does not exist!'.format(utterance_idx))
with self.features[feature_container] as fc:
fc.add(utterance_idx, feature_matrix)
def generate_features(self, feature_pipeline, target_feature_name, source_feature_name=None):
"""
Creates new feature container with features generated with the given pipeline.
If source_feature_name is not given the pipeline needs an extraction stage.
"""
target_fc = self.create_feature_container(target_feature_name)
target_fc.open()
source_fc = None
if source_feature_name is not None:
source_fc = self.features[source_feature_name]
source_fc.open()
for utterance_id in self.utterances.keys():
if source_feature_name is not None:
output = feature_pipeline.process(source_fc.get(utterance_id))
else:
samples, sr = self.read_utterance_data(utterance_id)
output = feature_pipeline.process_signal(samples, sr)
target_fc.add(utterance_id, output)
#
# DIV
#
def import_dataset(self, import_dataset, copy_files=False):
"""
Merges the given dataset into this dataset.
:param import_dataset: Dataset to merge
:param copy_files: If True moves the wavs to this datasets folder.
"""
if copy_files and not os.path.isdir(self.path):
raise ValueError('No path defined for this dataset, cannot copy files.')
file_idx_mapping = {}
for file_idx, file_to_import in import_dataset.files.items():
imported_file = self.add_file(os.path.abspath(os.path.join(import_dataset.path, file_to_import.path)), file_idx=file_idx,
copy_file=copy_files)
file_idx_mapping[file_idx] = imported_file.idx
speaker_idx_mapping = {}
for speaker_idx, speaker_to_import in import_dataset.speakers.items():
imported_speaker = self.add_speaker(speaker_idx=speaker_idx, gender=speaker_to_import.gender)
speaker_idx_mapping[speaker_idx] = imported_speaker.idx
utt_idx_mapping = {}
for utt_id, utterance_to_import in import_dataset.utterances.items():
import_file_idx = file_idx_mapping[utterance_to_import.file_idx]
import_speaker_idx = speaker_idx_mapping[utterance_to_import.speaker_idx]
imported_utterance = self.add_utterance(file_idx=import_file_idx,
utterance_idx=utt_id,
speaker_idx=import_speaker_idx,
start=utterance_to_import.start,
end=utterance_to_import.end)
utt_idx_mapping[utt_id] = imported_utterance.idx
for utt_id, keys in import_dataset.segmentations.items():
for key, seg in keys.items():
import_utt_id = utt_idx_mapping[utt_id]
self.add_segmentation(import_utt_id, segments=seg.segments, key=key)
#
# MODIFICATIONS
#
def subdivide_speakers(self, target_number_of_speakers):
"""
Divide the available speakers in the dataset into different speakers so the number of speakers is target_number_of_speakers.
:param target_number_of_speakers: Target number of speakers
"""
if self.num_speakers >= target_number_of_speakers:
print("Number of speakers already greater or equal to {}.".format(target_number_of_speakers))
return
spk2utt = self.speaker_to_utterance_dict()
spk2utt_count = {speaker_id: len(utterances) for speaker_id, utterances in spk2utt.items()}
utt_count = sum(spk2utt_count.values())
target_num_utts_per_speaker = int(utt_count / target_number_of_speakers)
# at least one part per speaker
spk2num_parts = {speaker_id: 1 for speaker_id, utt_count in spk2utt_count.items()}
spk2utt_count_intermediate = {speaker_id: utt_count - target_num_utts_per_speaker for speaker_id, utt_count in spk2utt_count.items()}
num_assigned_parts = len(spk2num_parts)
for i in range(num_assigned_parts, target_number_of_speakers):
sorted_spk2utt_count = sorted(spk2utt_count_intermediate.items(), key=lambda t: t[1], reverse=True)
spk2utt_count_intermediate[sorted_spk2utt_count[0][0]] -= target_num_utts_per_speaker
spk2num_parts[sorted_spk2utt_count[0][0]] += 1
for speaker_id, num_parts in spk2num_parts.items():
num_utts = spk2utt_count[speaker_id]
num_utts_per_part = int(num_utts / num_parts)
num_utts_rest = num_utts % num_parts
start_index = 0
shuffled_utt_ids = list(spk2utt[speaker_id])
random.shuffle(shuffled_utt_ids)
for i in range(num_parts):
num_utts_new = num_utts_per_part
if num_utts_rest > 0:
num_utts_new += 1
num_utts_rest -= 1
if i > 0:
new_speaker_id = naming.generate_name(15, not_in=self.speakers.keys())
new_speaker = self.add_speaker(new_speaker_id)
new_speaker.load_speaker_info_from_dict(self.speakers[speaker_id].get_speaker_info_dict())
new_speaker.part_from_speaker = speaker_id
part_utt_ids = shuffled_utt_ids[start_index:start_index + num_utts_new]
for utt_id in part_utt_ids:
if utt_id.starts_with(speaker_id):
changed_utt_id = utt_id.replace(speaker_id, new_speaker_id)
else:
changed_utt_id = naming.generate_name(15, not_in=self.utterances.keys())
self.utterances[changed_utt_id] = self.utterances[utt_id]
self.utterances[changed_utt_id].idx = changed_utt_id
del self.utterances[utt_id]
self.segmentations[changed_utt_id] = self.segmentations[utt_id]
for key, seg in self.segmentations[changed_utt_id].items():
seg.utterance_idx = changed_utt_id
del self.segmentations[utt_id]
start_index += num_utts_new
|
{
"content_hash": "65bde6ffaf4fb9074d8f79ed25ebe8a9",
"timestamp": "",
"source": "github",
"line_count": 728,
"max_line_length": 151,
"avg_line_length": 36.28434065934066,
"alnum_prop": 0.6085936021200076,
"repo_name": "ynop/spych",
"id": "b104e832a3929501f588d8bffee97ae779aca51e",
"size": "26415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spych/data/dataset/dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2743"
},
{
"name": "Python",
"bytes": "195881"
}
],
"symlink_target": ""
}
|
__author__ = 'Ahmed Hani Ibrahim'
import pandas as pnd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
def get_train_data():
training_data = pnd.read_csv("./train.csv", header=0, parse_dates=['Dates'])
#training_data = pnd.read_csv("./train.csv", header=0)
return training_data
def get_test_data():
testing_data = pnd.read_csv("./test.csv", header=0, parse_dates=['Dates'])
#testing_data = pnd.read_csv("./test.csv", header=0)
return testing_data
def vectorize_training_data(training_data):
training_data['Year'] = training_data['Dates'].map(lambda y: y.year)
training_data['Week'] = training_data['Dates'].map(lambda w: w.week)
training_data['Hour'] = training_data['Dates'].map(lambda h: h.hour)
categories = list(enumerate(sorted(np.unique(training_data['Category']))))
descripts = list(enumerate(sorted(np.unique(training_data['Descript']))))
day_of_weeks = list(enumerate(sorted(np.unique(training_data['DayOfWeek']))))
pd_districts = list(enumerate(sorted(np.unique(training_data['PdDistrict']))))
resolutions = list(enumerate(sorted(np.unique(training_data['Resolution']))))
#addresses = list(enumerate(sorted(np.unique(training_data['Address']))))
#set indices
categories_values = {name: i for i, name in categories}
descripts_values = {name: i for i, name in descripts}
day_of_weeks_values = {name: i for i, name in day_of_weeks}
pd_districts_values = {name: i for i, name in pd_districts}
resolutions_values = {name: i for i, name in resolutions}
#addresses_values = {name: i for i, name in addresses}
training_data['Category'] = training_data['Category'].map(lambda c: categories_values[c]).astype(int)
training_data['Descript'] = training_data['Descript'].map(lambda c: descripts_values[c]).astype(int)
training_data['DayOfWeek'] = training_data['DayOfWeek'].map(lambda c: day_of_weeks_values[c]).astype(int)
training_data['PdDistrict'] = training_data['PdDistrict'].map(lambda c: pd_districts_values[c]).astype(int)
training_data['Resolution'] = training_data['Resolution'].map(lambda c: resolutions_values[c]).astype(int)
training_data['X'] = training_data['X'].map(lambda x: "%.2f" % round(x, 2)).astype(float)
training_data['Y'] = training_data['Y'].map(lambda y: "%.2f" % round(y, 2)).astype(float)
return training_data
def vectorize_testing_data(testing_data):
testing_data['Year'] = testing_data['Dates'].map(lambda y: y.year)
testing_data['Week'] = testing_data['Dates'].map(lambda w: w.week)
testing_data['Hour'] = testing_data['Dates'].map(lambda h: h.hour)
day_of_weeks = list(enumerate(sorted(np.unique(testing_data['DayOfWeek']))))
pd_districts = list(enumerate(sorted(np.unique(testing_data['PdDistrict']))))
day_of_weeks_values = {name: i for i, name in day_of_weeks}
pd_districts_values = {name: i for i, name in pd_districts}
testing_data['DayOfWeek'] = testing_data['DayOfWeek'].map(lambda c: day_of_weeks_values[c]).astype(int)
testing_data['PdDistrict'] = testing_data['PdDistrict'].map(lambda c: pd_districts_values[c]).astype(int)
testing_data['X'] = testing_data['X'].map(lambda x: "%.2f" % round(x, 2)).astype(float)
testing_data['Y'] = testing_data['Y'].map(lambda y: "%.2f" % round(y, 2)).astype(float)
return testing_data
|
{
"content_hash": "cb129f9b060378cd0b77bcdbc3e551de",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 111,
"avg_line_length": 50.26865671641791,
"alnum_prop": 0.6802256532066508,
"repo_name": "AhmedHani/Kaggle-Machine-Learning-Competitions",
"id": "954238c96bcc1e26ca87cafe631130bc1f6b61cb",
"size": "3368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Easy/SanFranciscoCrimeClassification/get_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "139524"
},
{
"name": "Python",
"bytes": "46606"
}
],
"symlink_target": ""
}
|
from functools import partial
import os.path as op
from ...utils import verbose
from ..utils import (has_dataset, _data_path, _get_version, _version_doc,
_data_path_doc)
has_brainstorm_data = partial(has_dataset, name='brainstorm')
_description = u"""
URL: http://neuroimage.usc.edu/brainstorm/Tutorials/PhantomCtf
"""
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
verbose=None):
archive_name = dict(brainstorm='bst_phantom_ctf.tar.gz')
data_path = _data_path(path=path, force_update=force_update,
update_path=update_path, name='brainstorm',
download=download, archive_name=archive_name)
if data_path != '':
return op.join(data_path, 'bst_phantom_ctf')
else:
return data_path
_data_path_doc = _data_path_doc.format(name='brainstorm',
conf='MNE_DATASETS_BRAINSTORM_DATA'
'_PATH')
_data_path_doc = _data_path_doc.replace('brainstorm dataset',
'brainstorm (bst_phantom_ctf) dataset')
data_path.__doc__ = _data_path_doc
def get_version():
return _get_version('brainstorm')
get_version.__doc__ = _version_doc.format(name='brainstorm')
def description():
"""Get description of brainstorm (bst_phantom_ctf) dataset
"""
for desc in _description.splitlines():
print(desc)
|
{
"content_hash": "9f67c4ab4168628b511813cf4a1d071d",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 79,
"avg_line_length": 32.26086956521739,
"alnum_prop": 0.6030997304582211,
"repo_name": "jniediek/mne-python",
"id": "ab10b63ce1a3012e8509684339376771ebe108b1",
"size": "1562",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mne/datasets/brainstorm/bst_phantom_ctf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3679"
},
{
"name": "Python",
"bytes": "5509376"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
from .version import __version__ # noqa
from .xnat_BIDS import * # noqa
|
{
"content_hash": "ce780bf3bd6a5f3da49160793a167a12",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 64,
"avg_line_length": 46.333333333333336,
"alnum_prop": 0.7266187050359713,
"repo_name": "HBClab/xnat_BIDS",
"id": "e3e46e1b542518669b547e946ae5ae2c522991ec",
"size": "139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xnat_BIDS/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "40949"
},
{
"name": "Makefile",
"bytes": "268"
},
{
"name": "Python",
"bytes": "25614"
}
],
"symlink_target": ""
}
|
from msrest.paging import Paged
class USqlCredentialPaged(Paged):
"""
A paging container for iterating over a list of USqlCredential object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[USqlCredential]'}
}
def __init__(self, *args, **kwargs):
super(USqlCredentialPaged, self).__init__(*args, **kwargs)
|
{
"content_hash": "f1362fffc0037d80931238d6a37e58f6",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 73,
"avg_line_length": 26.5,
"alnum_prop": 0.5966981132075472,
"repo_name": "rjschwei/azure-sdk-for-python",
"id": "0d1e2a26e39af8f7aabd144b3a1dfab82f644091",
"size": "898",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_credential_paged.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8317911"
}
],
"symlink_target": ""
}
|
""" Test case for the GraphWidget"""
from tempfile import mkdtemp
from pySUMOQt import MainWindow
import pysumo
import shutil
"""
Steps:
1. Open pySUMO
2. Open TextEditor
3. Open Merge.kif
4. Choose Merge.kif
5. Collapse Line 288
6. Collapse Line 136
7. Collapse Line 134
8. Uncollapse Line 134
9. Uncollapse Line 288
10. Collapse all
11. Expand all
"""
if __name__ == "__main__":
tmpdir = mkdtemp()
pysumo.CONFIG_PATH = tmpdir
MainWindow.main()
shutil.rmtree(tmpdir, ignore_errors=True)
|
{
"content_hash": "6fddf032f7d9e344776cff8a86e31111",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 45,
"avg_line_length": 19.46153846153846,
"alnum_prop": 0.7154150197628458,
"repo_name": "pySUMO/pysumo",
"id": "2cf843dc69610804d8c14cf03713829988197bf6",
"size": "506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/gui/texteditor.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "107"
},
{
"name": "Python",
"bytes": "1164116"
},
{
"name": "Shell",
"bytes": "221"
}
],
"symlink_target": ""
}
|
""" Sahana Eden Situation Model
@copyright: 2009-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3SituationModel",)
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3SituationModel(S3Model):
"""
Situation Super Entity & Presence tables for Trackable resources
"""
names = ("sit_situation",
"sit_trackable",
"sit_presence",
"sit_location",
)
def model(self):
T = current.T
location_id = self.gis_location_id
configure = self.configure
super_entity = self.super_entity
# ---------------------------------------------------------------------
# Situation Super-Entity
#
situation_types = Storage(irs_incident = T("Incident"),
rms_req = T("Request"),
pr_presence = T("Presence"),
)
tablename = "sit_situation"
super_entity(tablename, "sit_id", situation_types,
Field("datetime", "datetime"),
location_id(),
)
configure(tablename,
deletable = False,
editable = False,
listadd = False,
)
# ---------------------------------------------------------------------
# Trackable Types
#
# Use:
# - add a field with super_link("track_id", "sit_trackable")
# - add as super-entity in configure (super_entity=s3db.sit_trackable)
#
trackable_types = Storage(asset_asset = T("Asset"),
dvi_body = T("Dead Body"),
event_resource = T("Event Resource"),
hrm_human_resource = T("Human Resource"),
pr_person = T("Person"),
)
tablename = "sit_trackable"
super_entity(tablename, "track_id",
trackable_types,
Field("track_timestmp", "datetime",
readable = False,
writable = False,
),
)
configure(tablename,
deletable = False,
editable = False,
listadd = False,
)
# Components
self.add_components(tablename,
# Presence
sit_presence=self.super_key("sit_trackable"),
)
# ---------------------------------------------------------------------
# Presence Records for trackables
#
# Use:
# - will be automatically available to all trackable types
#
tablename = "sit_presence"
self.define_table(tablename,
self.super_link("track_id", "sit_trackable"),
s3_datetime("timestmp",
label = T("Date/Time"),
),
location_id(
widget = S3LocationSelector(show_address=False,
show_postcode=False,
),
),
Field("direction",
label = T("Direction"),
),
Field("speed",
label = T("Speed"),
),
Field("accuracy",
label = T("Accuracy"),
),
Field("interlock",
readable = False,
writable = False,
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(sit_location = self.sit_location,
)
# ---------------------------------------------------------------------
@staticmethod
def sit_location(row, tablename):
"""
Virtual Field to return the current location of a Trackable
@ToDo: Bulk
@ToDo: Also show Timestamp of when seen there
"""
s3db = current.s3db
tracker = S3Tracker()(s3db[tablename], row[tablename].id)
location = tracker.get_location(as_rows=True).first()
return s3db.gis_location_represent(None, row=location)
# END =========================================================================
|
{
"content_hash": "e28c3f289f6503758ffdc3c5353c2ebc",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 80,
"avg_line_length": 37.576687116564415,
"alnum_prop": 0.43444897959183676,
"repo_name": "julianprabhakar/eden_car",
"id": "6bfeb003f6c33ec6871f90b64ae8c3e4ff2dbcee",
"size": "6150",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/s3db/sit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2030949"
},
{
"name": "JavaScript",
"bytes": "19162817"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "666"
},
{
"name": "Python",
"bytes": "28361616"
},
{
"name": "Ruby",
"bytes": "2051"
},
{
"name": "Shell",
"bytes": "4846"
},
{
"name": "XSLT",
"bytes": "2644035"
}
],
"symlink_target": ""
}
|
import jinja2
import os
# Supports an incomplete SPEC of DNS Zone entrys
class Zone(object):
# TODO: Add Validation
# TODO: Compare with RFC
def __init__(self):
self.contents = {
'A': [],
'AAAA': [],
'CAA': [],
'CERT': [],
'CNAME': [],
'NAPTR': [],
'NS': [],
'PTR': [],
'SOA': [],
'SPF': [],
'SRV': [],
'TXT': []
}
def a(self, value=None):
if not value:
return self.contents['A']
else:
idx = self.find(self.contents['A'], 'alias', value['alias'])
if idx != -1:
self.contents['A'].pop(idx)
self.contents['A'].append(value)
return self.contents['A']
def aaaa(self, value=None):
if not value:
return self.contents['AAAA']
else:
self.contents['AAAA'].append(value)
return self.contents['AAAA']
def caa(self, value=None):
if not value:
return self.contents['CAA']
else:
self.contents['CAA'].append(value)
return self.contents['CAA']
def cert(self, value=None):
if not value:
return self.contents['CERT']
else:
self.contents['CERT'].append(value)
return self.contents['CERT']
def cname(self, value=None):
if not value:
return self.contents['CNAME']
else:
idx = self.find(self.contents['CNAME'], 'alias', value['alias'])
if idx != -1:
self.contents['CNAME'].pop(idx)
self.contents['CNAME'].append(value)
return self.contents['CNAME']
def ns(self, value=None):
if not value:
return self.contents['NS']
else:
idx = self.find(self.contents['NS'], 'alias', value['alias'])
if idx != -1:
self.contents['NS'].pop(idx)
self.contents['NS'].append(value)
return self.contents['NS']
def naptr(self, value=None):
if not value:
return self.contents['NAPTR']
else:
# idx = self.find(self.contents['NAPTR'], 'alias', value['alias'])
# if idx != -1:
# self.contents['NAPTR'].pop(idx)
self.contents['NAPTR'].append(value)
return self.contents['NAPTR']
def ptr(self, value=None):
if not value:
return self.contents['PTR']
else:
self.contents['PTR'].append(value)
return self.contents['PTR']
def soa(self, value=None):
if not value:
return self.contents['SOA']
else:
if len(self.contents['SOA']) > 0:
self.contents['SOA'].pop(-1)
self.contents['SOA'].append(value)
return self.contents['SOA']
def spf(self, value=None):
if not value:
return self.contents['SPF']
else:
self.contents['SPF'].append(value)
return self.contents['SPF']
def srv(self, value=None):
if not value:
return self.contents['SRV']
else:
self.contents['SRV'].append(value)
return self.contents['SRV']
def txt(self, value=None):
if not value:
return self.contents['TXT']
else:
self.contents['TXT'].append(value)
return self.contents['TXT']
# ############
# Template Methods
# ############
def to_file(self, filepath='/etc/bind/db.example.com'):
contents = self.read_template()
t = jinja2.Template(contents)
with open('%s' % filepath, 'w') as f:
f.write(t.render(data=self.contents))
def read_template(self):
with open('%s/contrib/bind/templates/zone.jinja2' %
os.environ['CHARM_DIR']) as f:
return f.read()
# #############
# Utility methods
# #############
def find(self, lst, key, value):
for i, dic in enumerate(lst):
if dic[key] == value:
return i
return -1
def remove(self, needle, haystack, value):
if not haystack in self.contents.keys():
raise IndexError("Unable to locate %s in storage" % haystack)
idx = self.find(self.contents[haystack], needle, value)
if idx == -1:
raise KeyError("Value not found in %s" % haystack)
self.contents[haystack].pop(idx)
|
{
"content_hash": "14c0d936fd63049489f273cb5650f6a2",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 78,
"avg_line_length": 29.490322580645163,
"alnum_prop": 0.49332749945307375,
"repo_name": "chuckbutler/DNS-Charm",
"id": "dc4f6e2bfe18f680cca94ea6ba873d4492a9649f",
"size": "4571",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/bind/zone.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "297"
},
{
"name": "Makefile",
"bytes": "271"
},
{
"name": "Python",
"bytes": "65196"
},
{
"name": "Shell",
"bytes": "222"
}
],
"symlink_target": ""
}
|
from .hash_lookup import HashLookup
from .pg_trigram import PgTrigramCharField, PgTrigramTextField
from .range import DateRangeField, DateTimeRangeField, FloatRangeField, \
BigIntegerRangeField, IntegerRangeField
from .pg_ltree import PgLtreeField
|
{
"content_hash": "791b1282a2c47b3fd9c1a5c84ac857b0",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 73,
"avg_line_length": 50.4,
"alnum_prop": 0.8452380952380952,
"repo_name": "RedMadRobot/rmr_django",
"id": "7980cd10b362ce9bf1edd3c61d46e9fc537e374b",
"size": "252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rmr/models/fields/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "65602"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_r5_crafted_space.iff"
result.attribute_template_id = 9
result.stfName("droid_name","r5_base")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "072cf37c59b9984b757b3b3607d71747",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 62,
"avg_line_length": 22.307692307692307,
"alnum_prop": 0.6862068965517242,
"repo_name": "obi-two/Rebelion",
"id": "1469dcc7586ae3173d43d36858d578e25b546d44",
"size": "435",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/mobile/shared_r5_crafted_space.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
"""Provide the ReplyableMixin class."""
from ....const import API_PATH
class ReplyableMixin(object):
"""Interface for RedditBase classes that can be replied to."""
def reply(self, body):
"""Reply to the object.
:param body: The markdown formatted content for a comment.
:returns: A :class:`~.Comment` object for the newly created comment.
"""
data = {'text': body, 'thing_id': self.fullname}
return self._reddit.post(API_PATH['comment'], data=data)[0]
|
{
"content_hash": "28d7f795c022ce24cb642e68fb0457e1",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 76,
"avg_line_length": 32.0625,
"alnum_prop": 0.6374269005847953,
"repo_name": "nmtake/praw",
"id": "2e103fc87e61a8f49e90d16aeb86383fb37690a5",
"size": "513",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "praw/models/reddit/mixins/replyable.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "367899"
},
{
"name": "Shell",
"bytes": "189"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
from toolz import curry, pipe, partial
from .optimize import fuse, cull
import multiprocessing
import psutil
import dill
import pickle
from .async import get_async # TODO: get better get
def get(dsk, keys, optimizations=[fuse]):
""" Multiprocessed get function appropriate for Bags """
pool = multiprocessing.Pool(psutil.cpu_count())
manager = multiprocessing.Manager()
queue = manager.Queue()
apply_async = dill_apply_async(pool.apply_async)
# Optimize Dask
dsk2 = pipe(dsk, partial(cull, keys=keys), *optimizations)
try:
# Run
result = get_async(apply_async, psutil.cpu_count(), dsk2, keys,
queue=queue)
finally:
pool.close()
return result
def dill_apply_func(sfunc, sargs, skwds):
func = dill.loads(sfunc)
args = dill.loads(sargs)
kwds = dill.loads(skwds)
return func(*args, **kwds)
@curry
def dill_apply_async(apply_async, func, args=(), kwds={}):
sfunc = dill.dumps(func)
sargs = dill.dumps(args)
skwds = dill.dumps(kwds)
return apply_async(dill_apply_func, args=[sfunc, sargs, skwds])
|
{
"content_hash": "836dca1452e6bbb02d9245e5445492be",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 71,
"avg_line_length": 27.627906976744185,
"alnum_prop": 0.664983164983165,
"repo_name": "PeterDSteinberg/dask",
"id": "a9c1ec768a13c5a62c30bce147381654ea2a70c8",
"size": "1188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dask/multiprocessing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "200500"
}
],
"symlink_target": ""
}
|
import dilap.core.model as dmo
import dilap.core.tools as dpr
import dilap.core.vector as dpv
import pdb,math
class stairs(dmo.model):
def __init__(self,*args,**kwargs):
dmo.model.__init__(self,*args,**kwargs)
self._def('steps',8,**kwargs)
self._def('l',10,**kwargs)
self._def('w',4,**kwargs)
self._def('h',8,**kwargs)
self._geo()
def _profile(self,stepheight,steplength):
line = []
p = dpv.zero()
for sx in range(self.steps):
line.append(p.copy())
p.translate_z(stepheight)
line.append(p.copy())
p.translate_y(steplength)
line.append(p.copy())
return line
def _geo_from_profile(self,line,l,w,h,steps,stepheight,steplength):
topleft = [pt.copy().translate_x(-w/2.0) for pt in line]
topright = [pt.copy().translate_x(w/2.0) for pt in line]
bottom = dpr.point_line(dpv.zero(),dpv.vector(0,l,h),steps)
for bdx in range(steps):
bottom.insert(2*bdx+1,bottom[2*bdx+1].copy())
dpv.translate_coords_z(bottom[1:],-stepheight)
bottomleft = [pt.copy().translate_x(-w/2.0) for pt in bottom]
bottomright = [pt.copy().translate_x(w/2.0) for pt in bottom]
nfs = []
nfs.extend(self._bridge(topleft,topright))
nfs.extend(self._bridge(bottomleft,topleft))
nfs.extend(self._bridge(topright,bottomright))
nfs.extend(self._bridge(bottomright,bottomleft))
nfs.extend(self._quad(
topleft[-1],topright[-1],
bottomright[-1],bottomleft[-1]))
#self._project_uv_flat(nfs)
def _geo(self):
l,w,h = float(self.l),float(self.w),float(self.h)
steps = self.steps
stepheight = h/steps
steplength = l/steps
line = self._profile(stepheight,steplength)
self._geo_from_profile(line,l,w,h,steps,stepheight,steplength)
|
{
"content_hash": "ec41f477ecf5bab2238d0c46bbbc7434",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 71,
"avg_line_length": 35.236363636363635,
"alnum_prop": 0.5851393188854489,
"repo_name": "ctogle/dilapidator",
"id": "a4a26aa0ffe78526babdc2caa3ae39b3bc8485b5",
"size": "1938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/dilap/BROKEN/primitive/stairs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1115979"
}
],
"symlink_target": ""
}
|
'''
'''
import json
import threading
USE_SSL = False
PORT=8000
SERVER = None
from pwebsocket_server import WebsocketServer
class WSServer(WebsocketServer):
def sendMessageToAllClients(self, msg):
msgBuf = json.dumps(msg)
self.send_message_to_all(msgBuf)
def serveforever(self):
self.run_forever()
# Called for every client connecting (after handshake)
def new_client(client, server):
print("New client connected and was given id %d" % client['id'])
server.send_message_to_all("Hey all, a new client has joined us")
# Called for every client disconnecting
def client_left(client, server):
print("Client(%d) disconnected" % client['id'])
# Called when a client sends a message
def message_received(client, server, message):
if len(message) > 200:
message = message[:200]+'..'
print("Client(%d) said: %s" % (client['id'], message))
server.send_message_to_all(message)
#def send_msg_to_all(msg):
# msgBuf = json.dumps(msg)
# server.send_message_to_all(msgBuf)
def getServer():
# server = WebsocketServer(PORT)
server = WSServer(PORT)
server.set_fn_new_client(new_client)
server.set_fn_client_left(client_left)
server.set_fn_message_received(message_received)
return server
def runServer(server=None):
if server == None:
server = getServer()
server.run_forever()
def runInThread():
print "*** runServer started in thread"
server = getServer()
thread = threading.Thread(target=runServer,args=(server,))
thread.setDaemon(1)
thread.start()
return server
|
{
"content_hash": "c0267d61a6de3613269a4aa04d9816c2",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 66,
"avg_line_length": 24.29230769230769,
"alnum_prop": 0.6871437618746041,
"repo_name": "WorldViews/Spirals",
"id": "f288b6154c45b8ce3ad55c11df09887f8149a732",
"size": "1579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "KinPy/PWSServer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "3370"
},
{
"name": "Batchfile",
"bytes": "545"
},
{
"name": "CSS",
"bytes": "3143"
},
{
"name": "HTML",
"bytes": "2446990"
},
{
"name": "JavaScript",
"bytes": "2965457"
},
{
"name": "Python",
"bytes": "1074776"
},
{
"name": "Ruby",
"bytes": "308"
},
{
"name": "Shell",
"bytes": "431"
}
],
"symlink_target": ""
}
|
import addressbook_pb2
import sys
# Iterates though all people in the AddressBook and prints info about them.
def ListPeople(address_book):
for person in address_book.person:
print "Person ID:", person.id
print " Name:", person.name
if person.HasField('email'):
print " E-mail address:", person.email
for phone_number in person.phone:
if phone_number.type == addressbook_pb2.Person.MOBILE:
print " Mobile phone #:",
elif phone_number.type == addressbook_pb2.Person.HOME:
print " Home phone #:",
elif phone_number.type == addressbook_pb2.Person.WORK:
print " Work phone #:",
print phone_number.number
# Main procedure: Reads the entire address book from a file and prints all
# the information inside.
if len(sys.argv) != 2:
print "Usage:", sys.argv[0], "ADDRESS_BOOK_FILE"
sys.exit(-1)
address_book = addressbook_pb2.AddressBook()
# Read the existing address book.
f = open(sys.argv[1], "rb")
address_book.ParseFromString(f.read())
f.close()
ListPeople(address_book)
|
{
"content_hash": "1236f183f1b5e84e459d18533ddfb41a",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 75,
"avg_line_length": 31.08823529411765,
"alnum_prop": 0.6821192052980133,
"repo_name": "google/shipshape",
"id": "d470349a8bf34559391d682cdaf300c66b1aa118",
"size": "1139",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "third_party/proto/examples/list_people.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "963"
},
{
"name": "Dockerfile",
"bytes": "13077"
},
{
"name": "Go",
"bytes": "219880"
},
{
"name": "HTML",
"bytes": "6243"
},
{
"name": "Java",
"bytes": "145447"
},
{
"name": "JavaScript",
"bytes": "15005"
},
{
"name": "Python",
"bytes": "63655"
},
{
"name": "Shell",
"bytes": "30395"
}
],
"symlink_target": ""
}
|
"""The tests for the person component."""
from unittest.mock import Mock
import pytest
from homeassistant.components.device_tracker import (
ATTR_SOURCE_TYPE, SOURCE_TYPE_GPS, SOURCE_TYPE_ROUTER)
from homeassistant.components.person import (
ATTR_SOURCE, ATTR_USER_ID, DOMAIN, PersonManager)
from homeassistant.const import (
ATTR_GPS_ACCURACY, ATTR_ID, ATTR_LATITUDE, ATTR_LONGITUDE,
EVENT_HOMEASSISTANT_START, STATE_UNKNOWN)
from homeassistant.core import CoreState, State
from homeassistant.setup import async_setup_component
from tests.common import (
assert_setup_component, mock_component, mock_coro_func, mock_restore_cache)
DEVICE_TRACKER = 'device_tracker.test_tracker'
DEVICE_TRACKER_2 = 'device_tracker.test_tracker_2'
# pylint: disable=redefined-outer-name
@pytest.fixture
def storage_setup(hass, hass_storage, hass_admin_user):
"""Storage setup."""
hass_storage[DOMAIN] = {
'key': DOMAIN,
'version': 1,
'data': {
'persons': [
{
'id': '1234',
'name': 'tracked person',
'user_id': hass_admin_user.id,
'device_trackers': [DEVICE_TRACKER]
}
]
}
}
assert hass.loop.run_until_complete(
async_setup_component(hass, DOMAIN, {})
)
async def test_minimal_setup(hass):
"""Test minimal config with only name."""
config = {DOMAIN: {'id': '1234', 'name': 'test person'}}
with assert_setup_component(1):
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.test_person')
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) is None
assert state.attributes.get(ATTR_USER_ID) is None
async def test_setup_no_id(hass):
"""Test config with no id."""
config = {DOMAIN: {'name': 'test user'}}
assert not await async_setup_component(hass, DOMAIN, config)
async def test_setup_no_name(hass):
"""Test config with no name."""
config = {DOMAIN: {'id': '1234'}}
assert not await async_setup_component(hass, DOMAIN, config)
async def test_setup_user_id(hass, hass_admin_user):
"""Test config with user id."""
user_id = hass_admin_user.id
config = {
DOMAIN: {'id': '1234', 'name': 'test person', 'user_id': user_id}}
with assert_setup_component(1):
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.test_person')
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) is None
assert state.attributes.get(ATTR_USER_ID) == user_id
async def test_valid_invalid_user_ids(hass, hass_admin_user):
"""Test a person with valid user id and a person with invalid user id ."""
user_id = hass_admin_user.id
config = {DOMAIN: [
{'id': '1234', 'name': 'test valid user', 'user_id': user_id},
{'id': '5678', 'name': 'test bad user', 'user_id': 'bad_user_id'}]}
with assert_setup_component(2):
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.test_valid_user')
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) is None
assert state.attributes.get(ATTR_USER_ID) == user_id
state = hass.states.get('person.test_bad_user')
assert state is None
async def test_setup_tracker(hass, hass_admin_user):
"""Test set up person with one device tracker."""
hass.state = CoreState.not_running
user_id = hass_admin_user.id
config = {DOMAIN: {
'id': '1234', 'name': 'tracked person', 'user_id': user_id,
'device_trackers': DEVICE_TRACKER}}
with assert_setup_component(1):
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.tracked_person')
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) is None
assert state.attributes.get(ATTR_USER_ID) == user_id
hass.states.async_set(DEVICE_TRACKER, 'home')
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == STATE_UNKNOWN
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'home'
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER
assert state.attributes.get(ATTR_USER_ID) == user_id
hass.states.async_set(
DEVICE_TRACKER, 'not_home', {
ATTR_LATITUDE: 10.123456,
ATTR_LONGITUDE: 11.123456,
ATTR_GPS_ACCURACY: 10})
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'not_home'
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) == 10.123456
assert state.attributes.get(ATTR_LONGITUDE) == 11.123456
assert state.attributes.get(ATTR_GPS_ACCURACY) == 10
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER
assert state.attributes.get(ATTR_USER_ID) == user_id
async def test_setup_two_trackers(hass, hass_admin_user):
"""Test set up person with two device trackers."""
hass.state = CoreState.not_running
user_id = hass_admin_user.id
config = {DOMAIN: {
'id': '1234', 'name': 'tracked person', 'user_id': user_id,
'device_trackers': [DEVICE_TRACKER, DEVICE_TRACKER_2]}}
with assert_setup_component(1):
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.tracked_person')
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) is None
assert state.attributes.get(ATTR_USER_ID) == user_id
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
hass.states.async_set(
DEVICE_TRACKER, 'home', {ATTR_SOURCE_TYPE: SOURCE_TYPE_ROUTER})
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'home'
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_GPS_ACCURACY) is None
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER
assert state.attributes.get(ATTR_USER_ID) == user_id
hass.states.async_set(
DEVICE_TRACKER_2, 'not_home', {
ATTR_LATITUDE: 12.123456,
ATTR_LONGITUDE: 13.123456,
ATTR_GPS_ACCURACY: 12,
ATTR_SOURCE_TYPE: SOURCE_TYPE_GPS})
await hass.async_block_till_done()
hass.states.async_set(
DEVICE_TRACKER, 'not_home', {ATTR_SOURCE_TYPE: SOURCE_TYPE_ROUTER})
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'not_home'
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) == 12.123456
assert state.attributes.get(ATTR_LONGITUDE) == 13.123456
assert state.attributes.get(ATTR_GPS_ACCURACY) == 12
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER_2
assert state.attributes.get(ATTR_USER_ID) == user_id
hass.states.async_set(
DEVICE_TRACKER_2, 'zone1', {ATTR_SOURCE_TYPE: SOURCE_TYPE_GPS})
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'zone1'
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER_2
hass.states.async_set(
DEVICE_TRACKER, 'home', {ATTR_SOURCE_TYPE: SOURCE_TYPE_ROUTER})
await hass.async_block_till_done()
hass.states.async_set(
DEVICE_TRACKER_2, 'zone2', {ATTR_SOURCE_TYPE: SOURCE_TYPE_GPS})
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'home'
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER
async def test_ignore_unavailable_states(hass, hass_admin_user):
"""Test set up person with two device trackers, one unavailable."""
hass.state = CoreState.not_running
user_id = hass_admin_user.id
config = {DOMAIN: {
'id': '1234', 'name': 'tracked person', 'user_id': user_id,
'device_trackers': [DEVICE_TRACKER, DEVICE_TRACKER_2]}}
with assert_setup_component(1):
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.tracked_person')
assert state.state == STATE_UNKNOWN
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
hass.states.async_set(DEVICE_TRACKER, 'home')
await hass.async_block_till_done()
hass.states.async_set(DEVICE_TRACKER, 'unavailable')
await hass.async_block_till_done()
# Unknown, as only 1 device tracker has a state, but we ignore that one
state = hass.states.get('person.tracked_person')
assert state.state == STATE_UNKNOWN
hass.states.async_set(DEVICE_TRACKER_2, 'not_home')
await hass.async_block_till_done()
# Take state of tracker 2
state = hass.states.get('person.tracked_person')
assert state.state == 'not_home'
# state 1 is newer but ignored, keep tracker 2 state
hass.states.async_set(DEVICE_TRACKER, 'unknown')
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'not_home'
async def test_restore_home_state(hass, hass_admin_user):
"""Test that the state is restored for a person on startup."""
user_id = hass_admin_user.id
attrs = {
ATTR_ID: '1234', ATTR_LATITUDE: 10.12346, ATTR_LONGITUDE: 11.12346,
ATTR_SOURCE: DEVICE_TRACKER, ATTR_USER_ID: user_id}
state = State('person.tracked_person', 'home', attrs)
mock_restore_cache(hass, (state, ))
hass.state = CoreState.not_running
mock_component(hass, 'recorder')
config = {DOMAIN: {
'id': '1234', 'name': 'tracked person', 'user_id': user_id,
'device_trackers': DEVICE_TRACKER}}
with assert_setup_component(1):
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.tracked_person')
assert state.state == 'home'
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) == 10.12346
assert state.attributes.get(ATTR_LONGITUDE) == 11.12346
# When restoring state the entity_id of the person will be used as source.
assert state.attributes.get(ATTR_SOURCE) == 'person.tracked_person'
assert state.attributes.get(ATTR_USER_ID) == user_id
async def test_duplicate_ids(hass, hass_admin_user):
"""Test we don't allow duplicate IDs."""
config = {DOMAIN: [
{'id': '1234', 'name': 'test user 1'},
{'id': '1234', 'name': 'test user 2'}]}
with assert_setup_component(2):
assert await async_setup_component(hass, DOMAIN, config)
assert len(hass.states.async_entity_ids('person')) == 1
assert hass.states.get('person.test_user_1') is not None
assert hass.states.get('person.test_user_2') is None
async def test_create_person_during_run(hass):
"""Test that person is updated if created while hass is running."""
config = {DOMAIN: {}}
with assert_setup_component(0):
assert await async_setup_component(hass, DOMAIN, config)
hass.states.async_set(DEVICE_TRACKER, 'home')
await hass.async_block_till_done()
await hass.components.person.async_create_person(
'tracked person', device_trackers=[DEVICE_TRACKER])
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'home'
async def test_load_person_storage(hass, hass_admin_user, storage_setup):
"""Test set up person from storage."""
state = hass.states.get('person.tracked_person')
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) is None
assert state.attributes.get(ATTR_USER_ID) == hass_admin_user.id
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
hass.states.async_set(DEVICE_TRACKER, 'home')
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'home'
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER
assert state.attributes.get(ATTR_USER_ID) == hass_admin_user.id
async def test_load_person_storage_two_nonlinked(hass, hass_storage):
"""Test loading two users with both not having a user linked."""
hass_storage[DOMAIN] = {
'key': DOMAIN,
'version': 1,
'data': {
'persons': [
{
'id': '1234',
'name': 'tracked person 1',
'user_id': None,
'device_trackers': []
},
{
'id': '5678',
'name': 'tracked person 2',
'user_id': None,
'device_trackers': []
},
]
}
}
await async_setup_component(hass, DOMAIN, {})
assert len(hass.states.async_entity_ids('person')) == 2
assert hass.states.get('person.tracked_person_1') is not None
assert hass.states.get('person.tracked_person_2') is not None
async def test_ws_list(hass, hass_ws_client, storage_setup):
"""Test listing via WS."""
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
resp = await client.send_json({
'id': 6,
'type': 'person/list',
})
resp = await client.receive_json()
assert resp['success']
assert resp['result']['storage'] == manager.storage_persons
assert len(resp['result']['storage']) == 1
assert len(resp['result']['config']) == 0
async def test_ws_create(hass, hass_ws_client, storage_setup,
hass_read_only_user):
"""Test creating via WS."""
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
resp = await client.send_json({
'id': 6,
'type': 'person/create',
'name': 'Hello',
'device_trackers': [DEVICE_TRACKER],
'user_id': hass_read_only_user.id,
})
resp = await client.receive_json()
persons = manager.storage_persons
assert len(persons) == 2
assert resp['success']
assert resp['result'] == persons[1]
async def test_ws_create_requires_admin(hass, hass_ws_client, storage_setup,
hass_admin_user, hass_read_only_user):
"""Test creating via WS requires admin."""
hass_admin_user.groups = []
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
resp = await client.send_json({
'id': 6,
'type': 'person/create',
'name': 'Hello',
'device_trackers': [DEVICE_TRACKER],
'user_id': hass_read_only_user.id,
})
resp = await client.receive_json()
persons = manager.storage_persons
assert len(persons) == 1
assert not resp['success']
async def test_ws_update(hass, hass_ws_client, storage_setup):
"""Test updating via WS."""
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
persons = manager.storage_persons
resp = await client.send_json({
'id': 6,
'type': 'person/update',
'person_id': persons[0]['id'],
'name': 'Updated Name',
'device_trackers': [DEVICE_TRACKER_2],
'user_id': None,
})
resp = await client.receive_json()
persons = manager.storage_persons
assert len(persons) == 1
assert resp['success']
assert resp['result'] == persons[0]
assert persons[0]['name'] == 'Updated Name'
assert persons[0]['name'] == 'Updated Name'
assert persons[0]['device_trackers'] == [DEVICE_TRACKER_2]
assert persons[0]['user_id'] is None
state = hass.states.get('person.tracked_person')
assert state.name == 'Updated Name'
async def test_ws_update_require_admin(hass, hass_ws_client, storage_setup,
hass_admin_user):
"""Test updating via WS requires admin."""
hass_admin_user.groups = []
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
original = dict(manager.storage_persons[0])
resp = await client.send_json({
'id': 6,
'type': 'person/update',
'person_id': original['id'],
'name': 'Updated Name',
'device_trackers': [DEVICE_TRACKER_2],
'user_id': None,
})
resp = await client.receive_json()
assert not resp['success']
not_updated = dict(manager.storage_persons[0])
assert original == not_updated
async def test_ws_delete(hass, hass_ws_client, storage_setup):
"""Test deleting via WS."""
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
persons = manager.storage_persons
resp = await client.send_json({
'id': 6,
'type': 'person/delete',
'person_id': persons[0]['id'],
})
resp = await client.receive_json()
persons = manager.storage_persons
assert len(persons) == 0
assert resp['success']
assert len(hass.states.async_entity_ids('person')) == 0
ent_reg = await hass.helpers.entity_registry.async_get_registry()
assert not ent_reg.async_is_registered('person.tracked_person')
async def test_ws_delete_require_admin(hass, hass_ws_client, storage_setup,
hass_admin_user):
"""Test deleting via WS requires admin."""
hass_admin_user.groups = []
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
resp = await client.send_json({
'id': 6,
'type': 'person/delete',
'person_id': manager.storage_persons[0]['id'],
'name': 'Updated Name',
'device_trackers': [DEVICE_TRACKER_2],
'user_id': None,
})
resp = await client.receive_json()
assert not resp['success']
persons = manager.storage_persons
assert len(persons) == 1
async def test_create_invalid_user_id(hass):
"""Test we do not allow invalid user ID during creation."""
manager = PersonManager(hass, Mock(), [])
await manager.async_initialize()
with pytest.raises(ValueError):
await manager.async_create_person(
name='Hello',
user_id='non-existing'
)
async def test_create_duplicate_user_id(hass, hass_admin_user):
"""Test we do not allow duplicate user ID during creation."""
manager = PersonManager(
hass, Mock(async_add_entities=mock_coro_func()), []
)
await manager.async_initialize()
await manager.async_create_person(
name='Hello',
user_id=hass_admin_user.id
)
with pytest.raises(ValueError):
await manager.async_create_person(
name='Hello',
user_id=hass_admin_user.id
)
async def test_update_double_user_id(hass, hass_admin_user):
"""Test we do not allow double user ID during update."""
manager = PersonManager(
hass, Mock(async_add_entities=mock_coro_func()), []
)
await manager.async_initialize()
await manager.async_create_person(
name='Hello',
user_id=hass_admin_user.id
)
person = await manager.async_create_person(
name='Hello',
)
with pytest.raises(ValueError):
await manager.async_update_person(
person_id=person['id'],
user_id=hass_admin_user.id
)
async def test_update_invalid_user_id(hass):
"""Test updating to invalid user ID."""
manager = PersonManager(
hass, Mock(async_add_entities=mock_coro_func()), []
)
await manager.async_initialize()
person = await manager.async_create_person(
name='Hello',
)
with pytest.raises(ValueError):
await manager.async_update_person(
person_id=person['id'],
user_id='non-existing'
)
async def test_update_person_when_user_removed(hass, hass_read_only_user):
"""Update person when user is removed."""
manager = PersonManager(
hass, Mock(async_add_entities=mock_coro_func()), []
)
await manager.async_initialize()
person = await manager.async_create_person(
name='Hello',
user_id=hass_read_only_user.id
)
await hass.auth.async_remove_user(hass_read_only_user)
await hass.async_block_till_done()
assert person['user_id'] is None
|
{
"content_hash": "00c6fe8100211a885f0fff7eb4b42587",
"timestamp": "",
"source": "github",
"line_count": 625,
"max_line_length": 79,
"avg_line_length": 34.9312,
"alnum_prop": 0.6418559912055698,
"repo_name": "DavidLP/home-assistant",
"id": "cde7633b1a3890e1c29c63e012998984860ff47e",
"size": "21832",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "tests/components/person/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15309293"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
}
|
from random import randint
import re
import hearthbreaker
import hearthbreaker.constants
import hearthbreaker.game_objects
import hearthbreaker.cards
import hearthbreaker.game_objects
import hearthbreaker.proxies
class ReplayException(Exception):
def __init__(self, message):
super().__init__(message)
class ReplayAction:
def play(self, game):
pass
class SpellAction(ReplayAction):
def __init__(self, card, target=None, game=None):
self.card = card
if target is not None:
self.target = hearthbreaker.proxies.ProxyCharacter(target)
else:
self.target = None
def play(self, game):
if self.target is not None:
game.current_player.agent.next_target = self.target.resolve(game)
game.play_card(self.card.resolve(game))
game.current_player.agent.next_target = None
def to_output_string(self):
if self.target is not None:
return 'play({0},{1})'.format(self.card.to_output(), self.target.to_output())
return 'play({0})'.format(self.card.to_output())
class MinionAction(ReplayAction):
def __init__(self, card, index, target=None, game=None):
self.card = card
self.index = index
if target is not None:
self.target = hearthbreaker.proxies.ProxyCharacter(target)
else:
self.target = None
def to_output_string(self):
if self.target is not None:
return 'summon({0},{1},{2})'.format(self.card.to_output(), self.index, self.target.to_output())
return 'summon({0},{1})'.format(self.card.to_output(), self.index)
def play(self, game):
if self.target is not None:
game.current_player.agent.next_target = self.target.resolve(game)
game.current_player.agent.next_index = self.index
game.play_card(self.card.resolve(game))
game.current_player.agent.nextIndex = -1
class AttackAction(ReplayAction):
def __init__(self, character, target, game=None):
self.character = hearthbreaker.proxies.ProxyCharacter(character)
self.target = hearthbreaker.proxies.ProxyCharacter(target)
def to_output_string(self):
return 'attack({0},{1})'.format(self.character.to_output(), self.target.to_output())
def play(self, game):
game.current_player.agent.next_target = self.target.resolve(game)
self.character.resolve(game).attack()
game.current_player.agent.next_target = None
class PowerAction(ReplayAction):
def __init__(self, target=None, game=None):
self.target = target
if target is not None:
self.target = hearthbreaker.proxies.ProxyCharacter(target)
else:
self.target = None
self.game = game
def to_output_string(self):
if self.target is not None:
return 'power({0})'.format(self.target.to_output())
else:
return 'power()'
def play(self, game):
if self.target is not None:
game.current_player.agent.next_target = self.target.resolve(game)
game.current_player.hero.power.use()
game.current_player.agent.next_target = None
class TurnEndAction(ReplayAction):
def __init__(self, game=None):
pass
def to_output_string(self):
return 'end()'
def play(self, game):
pass
class ConcedeAction(ReplayAction):
def __init__(self):
pass
def to_output_string(self):
return "concede()"
def play(self, game):
game.current_player.hero.die(None)
game.current_player.hero.activate_delayed()
class Replay:
def __init__(self):
self.actions = []
self.random_numbers = []
self.last_card = None
self.card_class = None
self.last_target = None
self.last_index = None
self.game = None
self.decks = []
self.keeps = []
def save_decks(self, deck1, deck2):
self.decks = [deck1, deck2]
def record_random(self, result):
self.random_numbers.append(result)
def record_turn_end(self):
self._save_played_card()
self.actions.append(TurnEndAction())
def _save_played_card(self):
if self.last_card is not None:
if issubclass(self.card_class, hearthbreaker.game_objects.MinionCard):
if self.last_card.targetable:
self.actions.append(MinionAction(self.last_card, self.last_index, self.last_target, self.game))
self.last_card = None
self.last_index = None
self.last_target = None
else:
self.actions.append(MinionAction(self.last_card, self.last_index, game=self.game))
self.last_card = None
self.last_index = None
else:
if self.last_card.targetable:
self.actions.append(SpellAction(self.last_card, self.last_target, self.game))
self.last_card = None
self.last_target = None
else:
self.actions.append(SpellAction(self.last_card, game=self.game))
self.last_card = None
def record_card_played(self, card, index):
self._save_played_card()
self.last_card = hearthbreaker.proxies.ProxyCard(index, self.game)
self.last_card.targetable = card.targetable
self.card_class = type(card)
def record_option_chosen(self, option):
self.last_card.set_option(option)
def record_attack(self, attacker, target):
self._save_played_card()
self.actions.append(AttackAction(attacker, target, target.player.game))
def record_power(self):
self._save_played_card()
self.actions.append(PowerAction(game=self.game))
def record_power_target(self, target):
self.actions[len(self.actions) - 1].target = hearthbreaker.proxies.ProxyCharacter(target)
def record_kept_index(self, cards, card_index):
k_arr = []
for index in range(0, len(cards)):
if card_index[index]:
k_arr.append(index)
self.keeps.append(k_arr)
def write_replay(self, file):
# Mostly for testing, this function will check if the deck is made up of a repeating pattern
# and if so, shorten the output, since the parser will generate the pattern from a shorter sample
def shorten_deck(cards):
for pattern_length in range(1, 15):
matched = True
for index in range(pattern_length, 30):
if not isinstance(cards[index % pattern_length], type(cards[index])):
matched = False
break
if matched:
return cards[0:pattern_length]
if 'write' not in dir(file):
writer = open(file, 'w')
else:
writer = file
for deck in self.decks:
writer.write("deck(")
writer.write(hearthbreaker.constants.CHARACTER_CLASS.to_str(deck.character_class))
writer.write(",")
writer.write(",".join([card.name for card in shorten_deck(deck.cards)]))
writer.write(")\n")
if self.random_numbers.count(0) == len(self.random_numbers):
writer.write("random()\n")
else:
writer.write("random(")
writer.write(",".join([str(num) for num in self.random_numbers]))
writer.write(")\n")
for keep in self.keeps:
writer.write("keep(")
writer.write(",".join([str(k) for k in keep]))
writer.write(")\n")
for action in self.actions:
writer.write(action.to_output_string() + "\n")
def parse_replay(self, replayfile):
if 'read' not in dir(replayfile):
replayfile = open(replayfile, 'r')
line_pattern = re.compile("\s*(\w*)\s*\(([^)]*)\)\s*(;.*)?$")
for line in replayfile:
(action, args) = line_pattern.match(line).group(1, 2)
args = [arg.strip() for arg in args.split(",")]
if action == 'play':
card = args[0]
if len(args) > 1:
target = args[1]
else:
target = None
self.actions.append(SpellAction(hearthbreaker.proxies.ProxyCard(card), target))
elif action == 'summon':
card = args[0]
index = int(args[1])
if len(args) > 2:
target = args[2]
else:
target = None
self.actions.append(MinionAction(hearthbreaker.proxies.ProxyCard(card), index, target))
elif action == 'attack':
self.actions.append(AttackAction(args[0], args[1]))
elif action == 'power':
if len(args) > 0:
self.actions.append(PowerAction(args[0]))
elif action == 'end':
self.actions.append(TurnEndAction())
elif action == 'random':
if len(self.random_numbers) > 0:
raise ReplayException("Only one random number list per file")
if len(args[0]) > 0:
self.random_numbers = [int(num) for num in args]
else:
self.random_numbers = []
elif action == 'deck':
if len(self.decks) > 1:
raise ReplayException("Maximum of two decks per file")
deck_size = len(args) - 1
cards = [hearthbreaker.game_objects.card_lookup(args[1 + index % deck_size]) for index in range(0, 30)]
self.decks.append(
hearthbreaker.game_objects.Deck(cards, hearthbreaker.constants.CHARACTER_CLASS.from_str(args[0])))
elif action == 'keep':
if len(self.keeps) > 1:
raise ReplayException("Maximum of two keep directives per file")
self.keeps.append(args)
elif action == 'concede':
self.actions.append(ConcedeAction())
replayfile.close()
if len(self.keeps) is 0:
self.keeps = [[0, 1, 2], [0, 1, 2, 3]]
class RecordingGame(hearthbreaker.game_objects.Game):
def __init__(self, decks, agents):
game = self
class RecordingAgent:
__slots__ = ['agent']
def __init__(self, proxied_agent):
object.__setattr__(self, "agent", proxied_agent)
def choose_index(self, card, player):
index = self.agent.choose_index(card, player)
game.replay.last_index = index
return index
def choose_target(self, targets):
target = self.agent.choose_target(targets)
game.replay.last_target = target
return target
def choose_option(self, *options):
option = self.agent.choose_option(options)
game.replay.record_option_chosen(options.index(option))
return option
def __getattr__(self, item):
return self.agent.__getattribute__(item)
def __setattr__(self, key, value):
setattr(self.__getattribute__("agent"), key, value)
self.replay = hearthbreaker.replay.Replay()
self.replay.game = self
agents = [RecordingAgent(agents[0]), RecordingAgent(agents[1])]
super().__init__(decks, agents, self._find_random)
self.replay.save_decks(*decks)
self.bind("kept_cards", self.replay.record_kept_index)
for player in self.players:
player.bind("turn_ended", self.replay.record_turn_end,)
player.bind("used_power", self.replay.record_power)
player.hero.bind("found_power_target", self.replay.record_power_target)
player.bind("card_played", self.replay.record_card_played)
player.bind("attack", self.replay.record_attack)
def _find_random(self, lower_bound, upper_bound):
result = randint(lower_bound, upper_bound)
self.replay.record_random(result)
return result
class SavedGame(hearthbreaker.game_objects.Game):
def __init__(self, replay_file):
replay = Replay()
replay.parse_replay(replay_file)
action_index = 0
random_index = 0
game_ref = self
k_index = 0
def replay_random(start, end):
nonlocal random_index
random_index += 1
return replay.random_numbers[random_index - 1]
def null_random(start, end):
return 0
class ReplayAgent:
def __init__(self):
self.next_target = None
self.next_index = -1
self.next_option = None
def do_card_check(self, cards):
nonlocal k_index
keep_arr = [False] * len(cards)
for index in replay.keeps[k_index]:
keep_arr[int(index)] = True
k_index += 1
return keep_arr
def do_turn(self, player):
nonlocal action_index
while action_index < len(replay.actions) and not player.hero.dead and type(
replay.actions[action_index]) is not hearthbreaker.replay.TurnEndAction:
replay.actions[action_index].play(game_ref)
action_index += 1
action_index += 1
def set_game(self, game):
pass
def choose_target(self, targets):
return self.next_target
def choose_index(self, card, player):
return self.next_index
def choose_option(self, *options):
return options[self.next_option]
if len(replay.random_numbers) is 0:
random_func = null_random
else:
random_func = replay_random
super().__init__(replay.decks, [ReplayAgent(), ReplayAgent()], random_func)
|
{
"content_hash": "1617c59b362b8046577f25350f8098dc",
"timestamp": "",
"source": "github",
"line_count": 411,
"max_line_length": 119,
"avg_line_length": 34.571776155717764,
"alnum_prop": 0.5623196565557042,
"repo_name": "anuragpapineni/Hearthbreaker-evolved-agent",
"id": "7f9500b2f0a4831f5cb6b9cfe7a5ccf5869608b0",
"size": "14209",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hearthbreaker/replay.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "35387"
},
{
"name": "Makefile",
"bytes": "6762"
},
{
"name": "Python",
"bytes": "1230806"
},
{
"name": "Shell",
"bytes": "6459"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
import views
urlpatterns = patterns('',
url(r'^(\d+)', views.generate_thumbnails),
)
|
{
"content_hash": "92de92d9f53a5f717c31ce5d07044982",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 51,
"avg_line_length": 23.666666666666668,
"alnum_prop": 0.704225352112676,
"repo_name": "caa/django-galleries",
"id": "1cbe33d8e525d512661b4f41bcd603794931b821",
"size": "142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "galleries/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "10118"
}
],
"symlink_target": ""
}
|
import sys
import asyncio
import dscframework
import json
async def on_connect(cli):
print("subbing", flush=True)
await cli.subscribe("test", testsub)
# Guid is created by the sensor that first broadcasts this data
# TODO implement it, to track predictions etc
# TODO maybe the node server should add the python client to PYTHONPATH, if it does not already exist
async def testsub(head, data):
print("got sub data", flush=True)
print(json.dumps(head), flush=True)
print(data, flush=True)
async def main():
cli = dscframework.Client("ws://localhost:8080")
await cli.start(on_connect)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
{
"content_hash": "95173caa25de5b46f2d3be333767026b",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 101,
"avg_line_length": 27.53846153846154,
"alnum_prop": 0.7067039106145251,
"repo_name": "dscplatform/dscframework",
"id": "5c8c311844e5cc3272fbe7bfb1b2b4e3efbfedcd",
"size": "716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "16664"
},
{
"name": "Python",
"bytes": "5030"
}
],
"symlink_target": ""
}
|
import datetime
import os.path
import subprocess
from django.db import models
from django.core.files.base import ContentFile
from django.utils.translation import ugettext as _
from django.utils.hashcompat import md5_constructor
from django.utils.encoding import smart_str
from django.contrib.auth.models import User
try:
from cStringIO import StringIO
dir(StringIO) # Placate PyFlakes
except ImportError:
from StringIO import StringIO
try:
from PIL import Image
dir(Image) # Placate PyFlakes
except ImportError:
import Image
from avatar import AVATAR_STORAGE_DIR, AVATAR_RESIZE_METHOD, \
AVATAR_MAX_AVATARS_PER_USER, AVATAR_THUMB_FORMAT, \
AVATAR_HASH_USERDIRNAMES, AVATAR_HASH_FILENAMES, \
AVATAR_THUMB_QUALITY, AVATAR_USE_IMAGEMAGICK, \
AVATAR_IMAGEMAGIC_CONVERT
def avatar_file_path(instance=None, filename=None, size=None, ext=None, new=False):
tmppath = [AVATAR_STORAGE_DIR]
if AVATAR_HASH_USERDIRNAMES:
tmp = md5_constructor(instance.user.username).hexdigest()
tmppath.extend([tmp[0], tmp[1], instance.user.username])
else:
tmppath.append(instance.user.username)
if not filename:
# Filename already stored in database
filename = instance.avatar.name
else:
filename = filename
# File doesn't exist yet
if AVATAR_HASH_FILENAMES:
(root, oldext) = os.path.splitext(filename)
filename = md5_constructor(smart_str(filename)).hexdigest()
filename = filename + oldext
if size:
tmppath.extend(['resized', str(size)])
tmppath.append(os.path.basename(filename))
filename = os.path.join(*tmppath)
# ext overrides current extension
(root, oldext) = os.path.splitext(filename)
if ext and ext != oldext:
filename = root + "." + ext
if new:
# file does not yet exist, avoid filename collision
if instance is not None:
filename = instance.avatar.storage.get_available_name(filename)
else:
pass # Not sure how to avoid collisions without storage
return filename
def find_extension(format):
format = format.lower()
if format == 'jpeg':
format = 'jpg'
return format
class Avatar(models.Model):
user = models.ForeignKey(User)
primary = models.BooleanField(default=False)
avatar = models.ImageField(max_length=1024, upload_to=avatar_file_path, blank=True)
date_uploaded = models.DateTimeField(default=datetime.datetime.now)
def __unicode__(self):
return _(u'Avatar for %s') % self.user
def save(self, force_insert=False, force_update=False):
avatars = Avatar.objects.filter(user=self.user).exclude(id=self.id)
if AVATAR_MAX_AVATARS_PER_USER > 1:
if self.primary:
avatars = avatars.filter(primary=True)
avatars.update(primary=False)
else:
avatars.delete()
super(Avatar, self).save(force_insert, force_update)
def thumbnail_exists(self, size):
return self.avatar.storage.exists(self.avatar_name(size))
def create_thumbnail(self, size, quality=None):
try:
orig = self.avatar.storage.open(self.avatar.name, 'rb').read()
image = Image.open(StringIO(orig))
args = [AVATAR_IMAGEMAGIC_CONVERT, '-']
except IOError:
return # What should we do here? Render a "sorry, didn't work" img?
quality = quality or AVATAR_THUMB_QUALITY
(w, h) = image.size
if w != size or h != size:
if w > h:
diff = (w - h) / 2
if AVATAR_USE_IMAGEMAGICK:
args.extend(['-crop', '%dx%d+%d+%d' % (w, h, diff, 0)])
else:
image = image.crop((diff, 0, w - diff, h))
elif h > w:
diff = (h - w) / 2
if AVATAR_USE_IMAGEMAGICK:
args.extend(['-crop', '%dx%d+%d+%d' % (w, h, 0, diff)])
else:
image = image.crop((0, diff, w, h - diff))
if AVATAR_USE_IMAGEMAGICK:
args.extend(['-resize', '%dx%d' % (size, size)])
else:
image = image.resize((size, size), AVATAR_RESIZE_METHOD)
if image.mode != "RGB":
if AVATAR_USE_IMAGEMAGICK:
args.extend(['-colorspace', 'RGB'])
else:
image = image.convert("RGB")
thumb = StringIO()
if AVATAR_USE_IMAGEMAGICK:
args.extend(['-quality', str(quality), '%s:-' % AVATAR_THUMB_FORMAT])
proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
thumb_contents = proc.communicate(input=orig)[0]
thumb.write(thumb_contents)
else:
image.save(thumb, AVATAR_THUMB_FORMAT, quality=quality)
thumb_file = ContentFile(thumb.getvalue())
else:
thumb_file = ContentFile(orig)
thumb = self.avatar.storage.save(self.avatar_name(size, new=True), thumb_file)
def avatar_url(self, size):
return self.avatar.storage.url(self.avatar_name(size))
def avatar_name(self, size, new=False):
ext = find_extension(AVATAR_THUMB_FORMAT)
return avatar_file_path(
instance=self,
size=size,
ext=ext,
new=new,
)
|
{
"content_hash": "99be53bbca71b55638ed43c0ff6e76eb",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 92,
"avg_line_length": 36.69078947368421,
"alnum_prop": 0.5854402008248162,
"repo_name": "dgouldin/django-avatar",
"id": "801b89996ea8c6461bf4f320135ac31da4729580",
"size": "5577",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "avatar/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "34120"
}
],
"symlink_target": ""
}
|
import sys
from sc_stats import WartsStats, basic, obj_type
def print_trace(flags, ips, rtts, meta):
print "traceroute from %s to %s" % (flags['srcaddr'], flags['dstaddr'])
for i, ip in enumerate(ips):
ttl = i+1
print "%2d %s" % (ttl, ip),
if ttl in rtts:
print " %2.3f ms" % (rtts[ttl]),
if ttl in meta:
for v in meta[ttl]:
print "%s" % (v),
print
def print_ping(flags, responses):
rtts = []
print "ping %s to %s: %d byte packets" % (flags['srcaddr'], flags['dstaddr'], flags['size'])
rcount = set()
for response in responses:
rcount.add(response['probeid'])
rtt = response['rtt']/1000.0
rtts.append(rtt)
print "%d bytes from %s, seq=%d ttl=%d time=%3.3f ms" % \
(response['replysize'], response['addr'], response['probeid'],
response['replyttl'], rtt)
print "--- %s ping statistics ---" % flags['dstaddr']
loss = 100.0 - (len(rcount) * 100.0 / flags['psent'])
print "%d packets transmitted, %d packets received, %d%% packet loss" % \
(flags['psent'], len(rcount), loss)
if len(rcount) > 0:
print "round-trip min/avg/max/stddev = %2.3f/%2.3f/%2.3f/%2.3f ms" % \
basic(rtts)
if __name__ == "__main__":
assert len(sys.argv) >= 2
target = None
if len(sys.argv) == 3: target = sys.argv[2]
w = WartsStats(sys.argv[1], verbose=False)
while True:
try:
(typ, data) = w.next()
if typ == None:
break
elif typ == obj_type['TRACE']:
(flags, ips, rtts, meta) = data
if target and target != flags['dstaddr']: continue
print_trace(flags, ips, rtts, meta)
elif typ == obj_type['PING']:
(flags, responses) = data
if target and target != flags['dstaddr']: continue
print_ping(flags, responses)
except Exception, e:
print "Flags:", flags
print "** Error:", e
sys.exit(-1)
|
{
"content_hash": "40ae2ea4fac512077eea01c9cc9f3022",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 94,
"avg_line_length": 32.55172413793103,
"alnum_prop": 0.5715042372881356,
"repo_name": "cmand/scamper",
"id": "4a457532e96fdfe4c645b2b0906ab9de9fb841ee",
"size": "2046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sc_warts2text.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "62676"
}
],
"symlink_target": ""
}
|
from election_office_measure.models import CandidateCampaign
from organization.models import Organization
from position.models import PositionEntered
from rest_framework import serializers
class CandidateCampaignSerializer(serializers.ModelSerializer):
class Meta:
model = CandidateCampaign
fields = ('id_we_vote', 'candidate_name', 'candidate_url', 'email', 'facebook_url', 'google_civic_election_id',
'google_plus_url', 'order_on_ballot', 'party', 'phone', 'photo_url', 'twitter_url', 'youtube_url')
class OrganizationSerializer(serializers.ModelSerializer):
class Meta:
model = Organization
fields = ('id_we_vote', 'name', 'url')
class PositionSerializer(serializers.ModelSerializer):
class Meta:
model = PositionEntered
fields = ('id_we_vote', 'organization_id_we_vote', 'candidate_campaign_id_we_vote',
'measure_campaign_id_we_vote', 'date_entered', 'election_id', 'stance', 'more_info_url',
'statement_text', 'statement_html')
|
{
"content_hash": "3db663ccaaee2c3e796a5f40d4ca376f",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 119,
"avg_line_length": 42.12,
"alnum_prop": 0.6837606837606838,
"repo_name": "wevoteeducation/WeVoteBase",
"id": "a24389fbdb87e1429de899729554e9aa5017f11d",
"size": "1146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "import_export/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "53626"
},
{
"name": "HTML",
"bytes": "62040"
},
{
"name": "JavaScript",
"bytes": "70636"
},
{
"name": "Python",
"bytes": "404334"
}
],
"symlink_target": ""
}
|
import importlib
from sacred.utils import modules_exist
from sacred.utils import get_package_version, parse_version
def optional_import(*package_names):
try:
packages = [importlib.import_module(pn) for pn in package_names]
return True, packages[0]
except ImportError:
return False, None
def get_tensorflow():
# Ensures backward and forward compatibility with TensorFlow 1 and 2.
if get_package_version("tensorflow") < parse_version("1.13.1"):
import warnings
warnings.warn(
"Use of TensorFlow 1.12 and older is deprecated. "
"Use Tensorflow 1.13 or newer instead.",
DeprecationWarning,
)
import tensorflow as tf
else:
import tensorflow.compat.v1 as tf
return tf
# Get libc in a cross-platform way and use it to also flush the c stdio buffers
# credit to J.F. Sebastians SO answer from here:
# http://stackoverflow.com/a/22434262/1388435
try:
import ctypes
from ctypes.util import find_library
except ImportError:
libc = None
else:
try:
libc = ctypes.cdll.msvcrt # Windows
except OSError:
libc = ctypes.cdll.LoadLibrary(find_library("c"))
has_numpy, np = optional_import("numpy")
has_yaml, yaml = optional_import("yaml")
has_pandas, pandas = optional_import("pandas")
has_sqlalchemy = modules_exist("sqlalchemy")
has_mako = modules_exist("mako")
has_tinydb = modules_exist("tinydb", "tinydb_serialization", "hashfs")
has_tensorflow = modules_exist("tensorflow")
|
{
"content_hash": "533b9ca8db00c079655e99c8c3c66f8d",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 79,
"avg_line_length": 29.46153846153846,
"alnum_prop": 0.683420365535248,
"repo_name": "IDSIA/sacred",
"id": "3ca6e9aa92117e2b497a669c38676d861d6877c2",
"size": "1570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sacred/optional.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1603"
},
{
"name": "Python",
"bytes": "542648"
},
{
"name": "Shell",
"bytes": "96"
}
],
"symlink_target": ""
}
|
"""Module containing class for AWS's Glue Crawler."""
import json
from typing import Any, Dict, Optional, Tuple
from absl import flags
from perfkitbenchmarker import data_discovery_service
from perfkitbenchmarker import providers
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import util
FLAGS = flags.FLAGS
class CrawlNotCompletedError(Exception):
"""Used to signal a crawl is still running."""
class CrawlFailedError(Exception):
"""Used to signal a crawl has failed."""
class AwsGlueCrawler(data_discovery_service.BaseDataDiscoveryService):
"""AWS Glue Crawler Resource Class.
Attributes:
db_name: Name of the Glue database that will be provisioned.
crawler_name: Name of the crawler that will be provisioned.
role: Role the crawler will use. Refer to aws_glue_crawler_role flag for
more info.
sample_size: How many files will be crawled in each leaf directory. Refer to
aws_glue_crawler_sample_size flag for more info.
"""
CLOUD = providers.AWS
SERVICE_TYPE = 'glue'
READY = 'READY'
FAILED = 'FAILED'
CRAWL_TIMEOUT = 21600
CRAWL_POLL_INTERVAL = 5
def __init__(self):
super().__init__()
self.db_name = f'pkb-db-{FLAGS.run_uri}'
self.crawler_name = f'pkb-crawler-{FLAGS.run_uri}'
self.role = FLAGS.aws_glue_crawler_role
self.sample_size = FLAGS.aws_glue_crawler_sample_size
def _Create(self) -> None:
# creating database
database_input = {
'Name': self.db_name,
'Description': '\n'.join(
f'{k}={v}' for k, v in util.MakeDefaultTags().items()),
}
cmd = util.AWS_PREFIX + [
'glue',
'create-database',
'--database-input', json.dumps(database_input),
f'--region={self.region}',
]
vm_util.IssueCommand(cmd)
targets = {'S3Targets': [{'Path': self.data_discovery_path}]}
if self.sample_size is not None:
targets['S3Targets'][0]['SampleSize'] = self.sample_size
# creating crawler
cmd = util.AWS_PREFIX + [
'glue',
'create-crawler',
'--name', self.crawler_name,
'--role', self.role,
'--database-name', self.db_name,
'--targets', json.dumps(targets),
'--region', self.region,
'--tags', ','.join(
f'{k}={v}' for k, v in util.MakeDefaultTags().items()),
]
vm_util.IssueCommand(cmd)
def _Exists(self) -> bool:
return self._DbExists() and self._CrawlerExists()
def _IsReady(self, raise_on_crawl_failure=False) -> bool:
stdout, _, _ = self._GetCrawler()
data = json.loads(stdout)
if (data['Crawler'].get('LastCrawl', {}).get('Status') == self.FAILED and
raise_on_crawl_failure):
raise CrawlFailedError(
data['Crawler'].get('LastCrawl', {}).get('ErrorMessage', ''))
return data['Crawler']['State'] == self.READY
def _Delete(self) -> None:
# deleting database
cmd = util.AWS_PREFIX + [
'glue',
'delete-database',
'--name', self.db_name,
'--region', self.region,
]
vm_util.IssueCommand(cmd, raise_on_failure=False)
# deleting crawler
cmd = util.AWS_PREFIX + [
'glue',
'delete-crawler',
'--name', self.crawler_name,
'--region', self.region,
]
vm_util.IssueCommand(cmd, raise_on_failure=False)
def _IsDeleting(self) -> bool:
crawler_exists = self._CrawlerExists()
db_exists = self._DbExists()
if db_exists is None or crawler_exists is None:
return True
return self._DbExists() or self._CrawlerExists()
def DiscoverData(self) -> float:
"""Runs the AWS Glue Crawler. Returns the time elapsed in secs."""
cmd = util.AWS_PREFIX + [
'glue',
'start-crawler',
'--name', self.crawler_name,
'--region', self.region,
]
vm_util.IssueCommand(cmd)
self._WaitUntilCrawlerReady()
cmd = util.AWS_PREFIX + [
'glue',
'get-crawler-metrics',
'--crawler-name-list', self.crawler_name,
'--region', self.region,
]
output, _, _ = vm_util.IssueCommand(cmd)
data = json.loads(output)
assert (isinstance(data['CrawlerMetricsList'], list) and
len(data['CrawlerMetricsList']) == 1)
return data['CrawlerMetricsList'][0]['LastRuntimeSeconds']
def GetMetadata(self) -> Dict[str, Any]:
"""Return a dictionary of the metadata for this service."""
metadata = super().GetMetadata()
metadata.update(
aws_glue_crawler_sample_size=self.sample_size,
aws_glue_db_name=self.db_name,
aws_glue_crawler_name=self.crawler_name,
)
return metadata
@vm_util.Retry(
timeout=CRAWL_TIMEOUT,
poll_interval=CRAWL_POLL_INTERVAL,
fuzz=0,
retryable_exceptions=CrawlNotCompletedError,)
def _WaitUntilCrawlerReady(self):
if not self._IsReady(raise_on_crawl_failure=True):
raise CrawlNotCompletedError(
f'Crawler {self.crawler_name} still running.')
def _DbExists(self) -> Optional[bool]:
"""Whether the database exists or not.
It might return None if the API call failed with an unknown error.
Returns:
A bool or None.
"""
cmd = util.AWS_PREFIX + [
'glue',
'get-database',
'--name', self.db_name,
'--region', self.region,
]
_, stderr, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)
if not retcode:
return True
return False if 'EntityNotFoundException' in stderr else None
def _CrawlerExists(self) -> Optional[bool]:
"""Whether the crawler exists or not.
It might return None if the API call failed with an unknown error.
Returns:
A bool or None.
"""
_, stderr, retcode = self._GetCrawler(raise_on_failure=False)
if not retcode:
return True
return False if 'EntityNotFoundException' in stderr else None
def _GetCrawler(self, raise_on_failure=True) -> Tuple[str, str, int]:
"""Calls the AWS CLI to retrieve a crawler."""
cmd = util.AWS_PREFIX + [
'glue',
'get-crawler',
'--name', self.crawler_name,
'--region', self.region,
]
return vm_util.IssueCommand(cmd, raise_on_failure=raise_on_failure)
|
{
"content_hash": "d7caa77e0572a2ad4d088229ca74d443",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 80,
"avg_line_length": 30.465686274509803,
"alnum_prop": 0.6275140788415124,
"repo_name": "GoogleCloudPlatform/PerfKitBenchmarker",
"id": "4ebee8041b28358fd8eb2eb4615bbd60239123e2",
"size": "6215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "perfkitbenchmarker/providers/aws/aws_glue_crawler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3420"
},
{
"name": "HTML",
"bytes": "113073"
},
{
"name": "Jinja",
"bytes": "62005"
},
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "6076512"
},
{
"name": "R",
"bytes": "1017"
},
{
"name": "Shell",
"bytes": "76164"
},
{
"name": "Tcl",
"bytes": "14601"
}
],
"symlink_target": ""
}
|
X, y = preprocess(data, shuffle=False, n_samples=1000, normalization=None)
from sklearn.manifold import LocallyLinearEmbedding
lle = LocallyLinearEmbedding(n_neighbors=15,
n_components=3, method='modified')
X_proj = lle.fit_transform(X)
three_component_plot(X_proj[:, 0], X_proj[:, 1], X_proj[:, 2], y, labels, trim_outliers=True)
|
{
"content_hash": "7ebc7846f2e072451eb2ab81bf183647",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 93,
"avg_line_length": 45.25,
"alnum_prop": 0.6823204419889503,
"repo_name": "vtesin/sklearn_tutorial",
"id": "616693ca5a8ca773cd230a088e1e483b320221f3",
"size": "372",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "doc/notebooks/soln/03-02.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1121"
},
{
"name": "Python",
"bytes": "37675"
}
],
"symlink_target": ""
}
|
from httplib import HTTPSConnection, HTTPException
from urllib import urlencode
from ssl import SSLError
import sickbeard
from sickbeard import logger, common
class PushalotNotifier:
def test_notify(self, pushalot_authorizationtoken):
return self._sendPushalot(pushalot_authorizationtoken, event="Test", message="Testing Pushalot settings from Sick Beard", force=True)
def notify_snatch(self, ep_name):
if sickbeard.PUSHALOT_NOTIFY_ONSNATCH:
self._sendPushalot(pushalot_authorizationtoken=None, event=common.notifyStrings[common.NOTIFY_SNATCH], message=ep_name)
def notify_download(self, ep_name):
if sickbeard.PUSHALOT_NOTIFY_ONDOWNLOAD:
self._sendPushalot(pushalot_authorizationtoken=None, event=common.notifyStrings[common.NOTIFY_DOWNLOAD], message=ep_name)
def notify_subtitle_download(self, ep_name, lang):
if sickbeard.PUSHALOT_NOTIFY_ONSUBTITLEDOWNLOAD:
self._sendPushalot(pushalot_authorizationtoken=None, event=common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD], message=ep_name + ": " + lang)
def _sendPushalot(self, pushalot_authorizationtoken=None, event=None, message=None, force=False):
if not sickbeard.USE_PUSHALOT and not force:
return False
if pushalot_authorizationtoken == None:
pushalot_authorizationtoken = sickbeard.PUSHALOT_AUTHORIZATIONTOKEN
logger.log(u"Pushalot event: " + event, logger.DEBUG)
logger.log(u"Pushalot message: " + message, logger.DEBUG)
logger.log(u"Pushalot api: " + pushalot_authorizationtoken, logger.DEBUG)
http_handler = HTTPSConnection("pushalot.com")
data = {'AuthorizationToken': pushalot_authorizationtoken,
'Title': event.encode('utf-8'),
'Body': message.encode('utf-8') }
try:
http_handler.request("POST",
"/api/sendmessage",
headers = {'Content-type': "application/x-www-form-urlencoded"},
body = urlencode(data))
except (SSLError, HTTPException):
logger.log(u"Pushalot notification failed.", logger.ERROR)
return False
response = http_handler.getresponse()
request_status = response.status
if request_status == 200:
logger.log(u"Pushalot notifications sent.", logger.DEBUG)
return True
elif request_status == 410:
logger.log(u"Pushalot auth failed: %s" % response.reason, logger.ERROR)
return False
else:
logger.log(u"Pushalot notification failed.", logger.ERROR)
return False
notifier = PushalotNotifier
|
{
"content_hash": "69916f09d591bd1308f6696444e05979",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 156,
"avg_line_length": 44.921875,
"alnum_prop": 0.6281739130434782,
"repo_name": "Branlala/docker-sickbeardfr",
"id": "3d3d634d16ed24ff4463f151d7c3af284049bfff",
"size": "3698",
"binary": false,
"copies": "46",
"ref": "refs/heads/master",
"path": "sickbeard/sickbeard/notifiers/pushalot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "83278"
},
{
"name": "CSS",
"bytes": "155616"
},
{
"name": "JavaScript",
"bytes": "248414"
},
{
"name": "Python",
"bytes": "8146521"
},
{
"name": "Ruby",
"bytes": "2461"
},
{
"name": "Shell",
"bytes": "8791"
}
],
"symlink_target": ""
}
|
import time
import png
import numpy as np
from math import sqrt
def save_fig(pixels=[]):
pixels = np.array(pixels)
pixels = pixels.reshape(int(sqrt(len(pixels))), int(sqrt(len(pixels))))
pixels = pixels.tolist()
s = map(lambda x: map(int, x), pixels)
name = str(time.time()) + '.png'
f = open('generated/' + name, 'wb')
w = png.Writer(len(s[0]), len(s), greyscale=True, bitdepth=1)
w.write(f, s)
f.close()
|
{
"content_hash": "f6e7e51a8603d85be27a9f79b5d58be7",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 75,
"avg_line_length": 27.75,
"alnum_prop": 0.6148648648648649,
"repo_name": "ZebTech/geneticExperiment",
"id": "782b45a3e073f64303258bb0a0a7e45bfcd12608",
"size": "469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10406"
}
],
"symlink_target": ""
}
|
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard.dashboards.project.routers.extensions.routerrules\
import rulemanager
from horizon import tables
LOG = logging.getLogger(__name__)
class AddRouterRule(tables.LinkAction):
name = "create"
verbose_name = _("Add Router Rule")
url = "horizon:project:routers:addrouterrule"
classes = ("ajax-modal", "btn-create")
def get_link_url(self, datum=None):
router_id = self.table.kwargs['router_id']
return reverse(self.url, args=(router_id,))
class RemoveRouterRule(tables.DeleteAction):
data_type_singular = _("Router Rule")
data_type_plural = _("Router Rules")
failure_url = 'horizon:project:routers:detail'
def delete(self, request, obj_id):
router_id = self.table.kwargs['router_id']
rulemanager.remove_rules(request, [obj_id],
router_id=router_id)
class RouterRulesTable(tables.DataTable):
source = tables.Column("source", verbose_name=_("Source CIDR"))
destination = tables.Column("destination",
verbose_name=_("Destination CIDR"))
action = tables.Column("action", verbose_name=_("Action"))
nexthops = tables.Column("nexthops", verbose_name=_("Next Hops"))
def get_object_display(self, rule):
return "(%(action)s) %(source)s -> %(destination)s" % rule
class Meta:
name = "routerrules"
verbose_name = _("Router Rules")
table_actions = (AddRouterRule, RemoveRouterRule)
row_actions = (RemoveRouterRule, )
|
{
"content_hash": "39a98bf408de79bc768330beef161bc5",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 75,
"avg_line_length": 33.06,
"alnum_prop": 0.6563823351482153,
"repo_name": "hep-gc/glint-horizon",
"id": "903e5b8d4152890b7534e8846380183ac7108ae6",
"size": "2317",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/routers/extensions/routerrules/tables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "227769"
},
{
"name": "HTML",
"bytes": "329304"
},
{
"name": "JavaScript",
"bytes": "707335"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "3247421"
},
{
"name": "Shell",
"bytes": "17821"
}
],
"symlink_target": ""
}
|
import base64
import json
import logging
import os
from collections import defaultdict
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import render
from django.templatetags.static import static
from django.views.decorators.csrf import requires_csrf_token
from django_filters import rest_framework as filters
from jsonview.decorators import json_view
from jsonview.exceptions import BadRequest
from rest_framework import status as http_status
from rest_framework import generics
from rest_framework import permissions
from orchestra.core.errors import IllegalTaskSubmission
from orchestra.core.errors import TaskAssignmentError
from orchestra.core.errors import TaskStatusError
from orchestra.core.errors import TimerError
from orchestra.filters import TimeEntryFilter
from orchestra.models import Iteration
from orchestra.models import Step
from orchestra.models import Task
from orchestra.models import TaskAssignment
from orchestra.models import TimeEntry
from orchestra.models import Worker
from orchestra.project_api.serializers import TaskTimerSerializer
from orchestra.project_api.serializers import TimeEntrySerializer
from orchestra.utils import time_tracking
from orchestra.utils.load_json import load_encoded_json
from orchestra.utils.s3 import upload_editor_image
from orchestra.utils.task_lifecycle import get_task_overview_for_worker
from orchestra.utils.task_lifecycle import save_task
from orchestra.utils.task_lifecycle import submit_task
from orchestra.utils.task_lifecycle import tasks_assigned_to_worker
from orchestra.utils.task_lifecycle import worker_has_reviewer_status
from orchestra.utils.common_helpers import IsAssociatedWorker
logger = logging.getLogger(__name__)
UserModel = get_user_model()
# NOTE(joshblum): whitenoise is a bit over eager and tries to replace our
# {{js/css}} with a static resource, so we don't put these script tags on the
# page
def _get_script_tag(script):
return '<script src="{}" type="text/javascript"></script>'.format(script)
def _get_style_tag(style):
return '<link href="{}" rel="stylesheet">'.format(style)
@login_required
def index(request):
javascript_includes = []
stylesheet_includes = []
orchestra_arguments = {
'angular_modules': [],
'angular_directives': defaultdict(lambda: defaultdict(lambda: {})),
'csrf_cookie_name': settings.CSRF_COOKIE_NAME,
}
for step in Step.objects.filter(is_human=True):
# Preserve js and stylesheet order while removing duplicates
for js in step.user_interface.get('javascript_includes', []):
static_js = _get_script_tag(static(js))
if static_js not in javascript_includes:
javascript_includes.append(static_js)
for style in step.user_interface.get('stylesheet_includes', []):
static_style = _get_style_tag(static(style))
if static_style not in stylesheet_includes:
stylesheet_includes.append(static_style)
if step.user_interface.get('angular_module'):
orchestra_arguments['angular_modules'].append(
step.user_interface['angular_module'])
if step.user_interface.get('angular_directive'):
orchestra_arguments['angular_directives'][
step.workflow_version.workflow.slug][
step.workflow_version.slug][step.slug] = (
step.user_interface['angular_directive'])
return render(request, 'orchestra/index.html', {
'javascript_includes': javascript_includes,
'stylesheet_includes': stylesheet_includes,
'orchestra_arguments': json.dumps(orchestra_arguments)})
@login_required
def newindex(request):
return render(request, 'orchestra/newindex.html')
@json_view
@login_required
def dashboard_tasks(request):
worker = Worker.objects.get(user=request.user)
tasks = tasks_assigned_to_worker(worker)
return {'tasks': tasks,
'reviewerStatus': worker_has_reviewer_status(worker)}
@json_view
@login_required
def upload_image(request):
upload_data = load_encoded_json(request.body)
image_type = upload_data['image_type']
image_data = base64.b64decode(upload_data['image_data'])
prefix = upload_data.get('prefix') or ''
if settings.PRODUCTION:
prefix = os.path.join('production', prefix)
else:
prefix = os.path.join('development', prefix)
return {'url': upload_editor_image(image_data, image_type, prefix)}
@json_view
@login_required
def task_assignment_information(request):
try:
worker = Worker.objects.get(user=request.user)
return get_task_overview_for_worker(
load_encoded_json(request.body)['task_id'],
worker)
except TaskAssignmentError as e:
raise BadRequest(e)
except Task.DoesNotExist as e:
raise BadRequest(e)
@json_view
@login_required
def save_task_assignment(request):
assignment_information = load_encoded_json(request.body)
worker = Worker.objects.get(user=request.user)
try:
save_task(assignment_information['task_id'],
assignment_information['task_data'],
worker)
return {}
except Task.DoesNotExist:
raise BadRequest('No task for given id')
except TaskAssignmentError as e:
raise BadRequest(e)
@json_view
@login_required
def submit_task_assignment(request):
assignment_information = load_encoded_json(request.body)
worker = Worker.objects.get(user=request.user)
command_type = assignment_information['command_type']
if command_type in ('submit', 'accept'):
iteration_status = Iteration.Status.REQUESTED_REVIEW
elif command_type == 'reject':
iteration_status = Iteration.Status.PROVIDED_REVIEW
else:
raise BadRequest('Illegal command')
try:
submit_task(assignment_information['task_id'],
assignment_information['task_data'],
iteration_status,
worker)
return {}
except TaskStatusError:
raise BadRequest('Task already completed')
except Task.DoesNotExist:
raise BadRequest('No task for given id')
except IllegalTaskSubmission as e:
raise BadRequest(e)
except TaskAssignmentError as e:
raise BadRequest(e)
@json_view
@login_required
def start_timer(request):
worker = Worker.objects.get(user=request.user)
try:
if request.method == 'POST':
time_entry_data = load_encoded_json(request.body)
assignment_id = None
if 'assignment' in time_entry_data:
assignment_id = time_entry_data['assignment']
timer = time_tracking.start_timer(worker,
assignment_id=assignment_id)
serializer = TaskTimerSerializer(timer)
return serializer.data
except TaskAssignment.DoesNotExist:
raise BadRequest('Worker is not assigned to this task id.')
except TimerError as e:
raise BadRequest(e)
except Exception as e:
logger.exception(e)
raise e
@json_view
@login_required
def stop_timer(request):
worker = Worker.objects.get(user=request.user)
try:
if request.method == 'POST':
time_entry = time_tracking.stop_timer(worker)
serializer = TimeEntrySerializer(time_entry)
return serializer.data
except TimerError as e:
raise BadRequest(e)
except Exception as e:
logger.exception(e)
raise e
@json_view
@login_required
def get_timer(request):
worker = Worker.objects.get(user=request.user)
try:
if request.method == 'GET':
timer = time_tracking.get_timer_object(worker)
time_worked = time_tracking.get_timer_current_duration(worker)
data = TaskTimerSerializer(timer).data
if time_worked:
data['time_worked'] = str(time_worked)
return data
except Exception as e:
logger.error(e, exc_info=True)
raise e
@json_view
@login_required
def update_timer(request):
worker = Worker.objects.get(user=request.user)
try:
if request.method == 'POST':
data = load_encoded_json(request.body)
time_tracking.update_timer(
worker, data.get('description'), data.get('assignment'))
except Exception as e:
logger.error(e, exc_info=True)
raise e
# A simple status endpoint for things like health checks, etc.
def status(request):
return HttpResponse('OK')
def error_handler(request, error_code, context):
context.update({
'contact_us': settings.ORCHESTRA_NOTIFICATIONS_FROM_EMAIL,
'err_title': error_code,
})
return render(request,
'orchestra/error.html',
context=context,
status=error_code)
@requires_csrf_token
def bad_request(request, exception):
error_code = http_status.HTTP_400_BAD_REQUEST
return error_handler(request, error_code, context={
'page_title': '400 Bad Request',
})
@requires_csrf_token
def forbidden(request, exception):
error_code = http_status.HTTP_403_FORBIDDEN
return error_handler(request, error_code, context={
'page_title': '403 Forbidden',
})
@requires_csrf_token
def not_found(request, exception):
error_code = http_status.HTTP_404_NOT_FOUND
return error_handler(request, error_code, context={
'page_title': '404 Not Found',
})
@requires_csrf_token
def internal_server_error(request):
error_code = http_status.HTTP_500_INTERNAL_SERVER_ERROR
return error_handler(request, error_code, context={
'page_title': '500 Internal Server Error',
})
class TimeEntryList(generics.ListCreateAPIView):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = TimeEntrySerializer
filter_backends = (filters.DjangoFilterBackend,)
filterset_class = TimeEntryFilter
def get_queryset(self):
"""
Return time entries for current user, filtering on assignment id
if provided.
"""
# TODO(lydia): Add time filter to queryset.
worker = Worker.objects.get(user=self.request.user)
queryset = TimeEntry.objects.filter(worker=worker)
assignment_id = self.request.query_params.get('assignment', None)
if assignment_id is not None:
queryset = queryset.filter(assignment__id=assignment_id)
return queryset
def perform_create(self, serializer):
"""
Overwrite perform_create so that user can only create time entries
for him or herself.
"""
# TODO(lydia): Is there a way to prevent workers from creating
# time entries for completed TaskAssignments?
worker = Worker.objects.get(user=self.request.user)
serializer.save(worker=worker)
class TimeEntryDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticated, IsAssociatedWorker)
queryset = TimeEntry.objects.all()
serializer_class = TimeEntrySerializer
|
{
"content_hash": "a43e610c56a05ba349ca4762c2b93a41",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 77,
"avg_line_length": 33.8562874251497,
"alnum_prop": 0.6818181818181818,
"repo_name": "b12io/orchestra",
"id": "89e4b15e24bb7ab9708b009d08f4dc365fcb4608",
"size": "11308",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "orchestra/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "50496"
},
{
"name": "HTML",
"bytes": "101830"
},
{
"name": "JavaScript",
"bytes": "353673"
},
{
"name": "Makefile",
"bytes": "1234"
},
{
"name": "Python",
"bytes": "975395"
},
{
"name": "SCSS",
"bytes": "32860"
},
{
"name": "Shell",
"bytes": "26"
},
{
"name": "TypeScript",
"bytes": "20983"
}
],
"symlink_target": ""
}
|
import numpy as np
import torch
from PIL import Image
from argparse import ArgumentParser
from torch.optim import SGD, Adam
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, CenterCrop, Normalize
from torchvision.transforms import ToTensor, ToPILImage
from piwise.dataset import VOC12
from piwise.network import FCN8, FCN16, FCN32, UNet, PSPNet, SegNet
from piwise.criterion import CrossEntropyLoss2d
from piwise.transform import Relabel, ToLabel, Colorize
from piwise.visualize import Dashboard
NUM_CHANNELS = 3
NUM_CLASSES = 22
color_transform = Colorize()
image_transform = ToPILImage()
input_transform = Compose([
CenterCrop(256),
ToTensor(),
Normalize([.485, .456, .406], [.229, .224, .225]),
])
target_transform = Compose([
CenterCrop(256),
ToLabel(),
Relabel(255, 21),
])
def train(args, model):
model.train()
weight = torch.ones(22)
weight[0] = 0
loader = DataLoader(VOC12(args.datadir, input_transform, target_transform),
num_workers=args.num_workers, batch_size=args.batch_size, shuffle=True)
if args.cuda:
criterion = CrossEntropyLoss2d(weight.cuda())
else:
criterion = CrossEntropyLoss2d(weight)
optimizer = Adam(model.parameters())
if args.model.startswith('FCN'):
optimizer = SGD(model.parameters(), 1e-4, .9, 2e-5)
if args.model.startswith('PSP'):
optimizer = SGD(model.parameters(), 1e-2, .9, 1e-4)
if args.model.startswith('Seg'):
optimizer = SGD(model.parameters(), 1e-3, .9)
if args.steps_plot > 0:
board = Dashboard(args.port)
for epoch in range(1, args.num_epochs+1):
epoch_loss = []
for step, (images, labels) in enumerate(loader):
if args.cuda:
images = images.cuda()
labels = labels.cuda()
inputs = Variable(images)
targets = Variable(labels)
outputs = model(inputs)
optimizer.zero_grad()
loss = criterion(outputs, targets[:, 0])
loss.backward()
optimizer.step()
epoch_loss.append(loss.data[0])
if args.steps_plot > 0 and step % args.steps_plot == 0:
image = inputs[0].cpu().data
image[0] = image[0] * .229 + .485
image[1] = image[1] * .224 + .456
image[2] = image[2] * .225 + .406
board.image(image,
f'input (epoch: {epoch}, step: {step})')
board.image(color_transform(outputs[0].cpu().max(0)[1].data),
f'output (epoch: {epoch}, step: {step})')
board.image(color_transform(targets[0].cpu().data),
f'target (epoch: {epoch}, step: {step})')
if args.steps_loss > 0 and step % args.steps_loss == 0:
average = sum(epoch_loss) / len(epoch_loss)
print(f'loss: {average} (epoch: {epoch}, step: {step})')
if args.steps_save > 0 and step % args.steps_save == 0:
filename = f'{args.model}-{epoch:03}-{step:04}.pth'
torch.save(model.state_dict(), filename)
print(f'save: {filename} (epoch: {epoch}, step: {step})')
def evaluate(args, model):
model.eval()
image = input_transform(Image.open(args.image))
label = model(Variable(image, volatile=True).unsqueeze(0))
label = color_transform(label[0].data.max(0)[1])
image_transform(label).save(args.label)
def main(args):
Net = None
if args.model == 'fcn8':
Net = FCN8
if args.model == 'fcn16':
Net = FCN16
if args.model == 'fcn32':
Net = FCN32
if args.model == 'fcn32':
Net = FCN32
if args.model == 'unet':
Net = UNet
if args.model == 'pspnet':
Net = PSPNet
if args.model == 'segnet':
Net = SegNet
assert Net is not None, f'model {args.model} not available'
model = Net(NUM_CLASSES)
if args.cuda:
model = model.cuda()
if args.state:
try:
model.load_state_dict(torch.load(args.state))
except AssertionError:
model.load_state_dict(torch.load(args.state,
map_location=lambda storage, loc: storage))
if args.mode == 'eval':
evaluate(args, model)
if args.mode == 'train':
train(args, model)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--model', required=True)
parser.add_argument('--state')
subparsers = parser.add_subparsers(dest='mode')
subparsers.required = True
parser_eval = subparsers.add_parser('eval')
parser_eval.add_argument('image')
parser_eval.add_argument('label')
parser_train = subparsers.add_parser('train')
parser_train.add_argument('--port', type=int, default=80)
parser_train.add_argument('--datadir', required=True)
parser_train.add_argument('--num-epochs', type=int, default=32)
parser_train.add_argument('--num-workers', type=int, default=4)
parser_train.add_argument('--batch-size', type=int, default=1)
parser_train.add_argument('--steps-loss', type=int, default=50)
parser_train.add_argument('--steps-plot', type=int, default=0)
parser_train.add_argument('--steps-save', type=int, default=500)
main(parser.parse_args())
|
{
"content_hash": "820f2ff607fea1196314cfbd82860e45",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 79,
"avg_line_length": 33.355828220858896,
"alnum_prop": 0.6051131138495494,
"repo_name": "bodokaiser/piwise",
"id": "2accfabc8a11170b3bb35e7ae06e996db626f682",
"size": "5437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "21272"
}
],
"symlink_target": ""
}
|
import sys
sys.path.insert(1, "../../")
import h2o, tests
def varimp_test(ip,port):
train = h2o.import_file(path=h2o.locate("smalldata/iris/iris_wheader.csv"))
# Run GBM
my_gbm = h2o.gbm(y=train["class"], x=train[1:4], ntrees=50, learn_rate=0.1, distribution="multinomial")
should_be_none = my_gbm.varimp()
assert should_be_none is None, "expected varimp to return None, but returned {0}".format(should_be_none)
should_be_list = my_gbm.varimp(return_list=True)
assert len(should_be_list) == 3, "expected varimp list to contain 3 entries, but it has " \
"{0}".format(len(should_be_list))
assert len(should_be_list[0]) == 4, "expected varimp entry to contain 4 elements (variable, relative_importance, " \
"scaled_importance, percentage), but it has {0}".format(len(should_be_list[0]))
if __name__ == "__main__":
tests.run_test(sys.argv, varimp_test)
|
{
"content_hash": "5bf08484722901ed9b11e6a2751c7a5e",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 120,
"avg_line_length": 42.47826086956522,
"alnum_prop": 0.6151484135107472,
"repo_name": "bospetersen/h2o-3",
"id": "5a67a866754a797d127c1d480c9a3b2eb7ed983a",
"size": "977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_misc/pyunit_varimp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "163561"
},
{
"name": "CoffeeScript",
"bytes": "262107"
},
{
"name": "Emacs Lisp",
"bytes": "8914"
},
{
"name": "Groovy",
"bytes": "78"
},
{
"name": "HTML",
"bytes": "146874"
},
{
"name": "Java",
"bytes": "5441396"
},
{
"name": "JavaScript",
"bytes": "88331"
},
{
"name": "Makefile",
"bytes": "31513"
},
{
"name": "Python",
"bytes": "2021301"
},
{
"name": "R",
"bytes": "1829960"
},
{
"name": "Rebol",
"bytes": "3997"
},
{
"name": "Ruby",
"bytes": "299"
},
{
"name": "Scala",
"bytes": "16336"
},
{
"name": "Shell",
"bytes": "44718"
},
{
"name": "TeX",
"bytes": "470617"
}
],
"symlink_target": ""
}
|
from typing import Optional, Tuple, Type
from sortedcontainers import SortedKeyList
from types import TracebackType
import asyncio
class _AcquireManager:
def __init__(self, ws: 'WeightedSemaphore', n: int):
self._ws = ws
self._n = n
async def __aenter__(self) -> '_AcquireManager':
await self._ws.acquire(self._n)
return self
async def __aexit__(self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
self._ws.release(self._n)
class WeightedSemaphore:
def __init__(self, value: int):
self.max = value
self.value = value
self.events: SortedKeyList[Tuple[int, asyncio.Event]] = SortedKeyList(key=lambda x: x[0])
def release(self, n: int) -> None:
self.value += n
while self.events:
n, event = self.events[0]
if self.value >= n:
self.events.pop(0)
self.value -= n
event.set()
else:
break
def acquire_manager(self, n: int) -> _AcquireManager:
return _AcquireManager(self, n)
async def acquire(self, n) -> None:
assert n <= self.max
if self.value >= n:
self.value -= n
return
event = asyncio.Event()
self.events.add((n, event))
await event.wait()
|
{
"content_hash": "7a28afb65819abb0e0a1634c35980c40",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 97,
"avg_line_length": 28.862745098039216,
"alnum_prop": 0.5441576086956522,
"repo_name": "hail-is/hail",
"id": "d899c0779dd04e73c440abd6c42da3aaddbdd170",
"size": "1472",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "hail/python/hailtop/aiotools/weighted_semaphore.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7729"
},
{
"name": "C",
"bytes": "779"
},
{
"name": "C++",
"bytes": "171899"
},
{
"name": "CMake",
"bytes": "3045"
},
{
"name": "CSS",
"bytes": "666"
},
{
"name": "Dockerfile",
"bytes": "10056"
},
{
"name": "Emacs Lisp",
"bytes": "377"
},
{
"name": "HCL",
"bytes": "54923"
},
{
"name": "HTML",
"bytes": "155946"
},
{
"name": "Java",
"bytes": "38401"
},
{
"name": "JavaScript",
"bytes": "877"
},
{
"name": "Jupyter Notebook",
"bytes": "305748"
},
{
"name": "MLIR",
"bytes": "20"
},
{
"name": "Makefile",
"bytes": "61284"
},
{
"name": "Python",
"bytes": "5635857"
},
{
"name": "R",
"bytes": "3038"
},
{
"name": "SCSS",
"bytes": "33487"
},
{
"name": "Scala",
"bytes": "5050997"
},
{
"name": "Shell",
"bytes": "75539"
},
{
"name": "XSLT",
"bytes": "5748"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.core import mail
from django.test import TestCase
from django.test.utils import captured_stdout
from django.test.utils import override_settings
from smsish.sms import send_sms
from smsish.sms import send_mass_sms
from smsish.sms.message import SMSMessage
VALID_FROM_NUMBER = settings.TWILIO_MAGIC_FROM_NUMBER
VALID_TO_NUMBER = settings.TWILIO_MAGIC_FROM_NUMBER
@override_settings(SMS_BACKEND='smsish.sms.backends.dummy.SMSBackend')
class SMSMessageTestCase(TestCase):
def setUp(self):
self.sms = SMSMessage(
"Body",
VALID_FROM_NUMBER,
[VALID_TO_NUMBER]
)
self.sms_no_recipients = SMSMessage("Body", VALID_TO_NUMBER, [])
def test_create_with_subject_not_allowed(self):
with self.assertRaises(TypeError):
SMSMessage("Subject", "Body", VALID_FROM_NUMBER, [VALID_TO_NUMBER])
def test_recipients(self):
sms = self.sms
recipients = sms.recipients()
self.assertEqual(recipients, [VALID_TO_NUMBER])
def test_send(self):
sms = self.sms
numSent = sms.send()
# Test that one message has been sent.
self.assertEqual(numSent, 1)
self.assertEqual(len(mail.outbox), 0)
def test_send_sms(self):
numSent = send_sms("Body", VALID_FROM_NUMBER, [VALID_TO_NUMBER])
self.assertEqual(numSent, 1)
self.assertEqual(len(mail.outbox), 0)
def test_send_to_nobody(self):
sms = self.sms_no_recipients
numSent = sms.send()
# Test that no message has been sent.
self.assertEqual(numSent, 0)
self.assertEqual(len(mail.outbox), 0)
def test_send_mass_sms(self):
messageSpec = ("Body", VALID_FROM_NUMBER, [VALID_TO_NUMBER])
datatuple = (messageSpec for _ in range(10))
numSent = send_mass_sms(datatuple)
self.assertEqual(numSent, 10)
self.assertEqual(len(mail.outbox), 0)
@override_settings(SMS_BACKEND='smsish.sms.backends.console.SMSBackend')
class SendSMSUsingConsoleTestCase(TestCase):
def setUp(self):
self.sms = SMSMessage(
"Body",
VALID_FROM_NUMBER,
[VALID_TO_NUMBER]
)
self.sms_no_recipients = SMSMessage("Body", VALID_TO_NUMBER, [])
def test_send(self):
with captured_stdout() as stdout:
sms = self.sms
numSent = sms.send()
self.assertEqual(numSent, 1)
output = stdout.getvalue()
self.assertTrue("Subject: None" in output)
self.assertTrue("From: +15005550006" in output)
self.assertTrue("To: +15005550006" in output)
self.assertTrue("Body" in output)
with captured_stdout() as stdout:
sms_no_recipients = self.sms_no_recipients
numSent = sms_no_recipients.send()
self.assertEqual(numSent, 0)
self.assertEqual(stdout.getvalue(), "")
def test_send_sms(self):
with captured_stdout() as stdout:
numSent = send_sms("Body", VALID_FROM_NUMBER, [VALID_TO_NUMBER])
self.assertEqual(numSent, 1)
self.assertEqual(len(mail.outbox), 0)
output = stdout.getvalue()
self.assertTrue("Subject: None" in output)
self.assertTrue("From: +15005550006" in output)
self.assertTrue("To: +15005550006" in output)
self.assertTrue("Body" in output)
def test_send_to_nobody(self):
sms = self.sms_no_recipients
numSent = sms.send()
self.assertEqual(numSent, 0)
self.assertEqual(len(mail.outbox), 0)
def test_send_mass_sms(self):
with captured_stdout() as stdout:
datatuple = (("Body", VALID_FROM_NUMBER, [VALID_TO_NUMBER]) for _ in range(10))
numSent = send_mass_sms(datatuple)
self.assertEqual(numSent, 10)
self.assertEqual(len(mail.outbox), 0)
output = stdout.getvalue()
self.assertTrue("Subject: None" in output)
self.assertTrue("From: +15005550006" in output)
self.assertTrue("To: +15005550006" in output)
self.assertTrue("Body" in output)
@override_settings(SMS_BACKEND='smsish.sms.backends.twilio.SMSBackend')
class SendSMSUsingTwilioTestCase(TestCase):
def setUp(self):
self.sms = SMSMessage(
"Body",
VALID_FROM_NUMBER,
[VALID_TO_NUMBER]
)
self.sms_no_recipients = SMSMessage("Body", VALID_TO_NUMBER, [])
def test_send(self):
sms = self.sms
numSent = sms.send()
self.assertEqual(numSent, 1)
self.assertEqual(len(mail.outbox), 0)
def test_send_sms(self):
numSent = send_sms("Body", VALID_FROM_NUMBER, [VALID_TO_NUMBER])
self.assertEqual(numSent, 1)
self.assertEqual(len(mail.outbox), 0)
def test_send_to_nobody(self):
sms = self.sms_no_recipients
numSent = sms.send()
self.assertEqual(numSent, 0)
self.assertEqual(len(mail.outbox), 0)
def test_send_mass_sms(self):
from smsish.sms import get_sms_connection
with get_sms_connection(settings.SMS_BACKEND) as connection:
datatuple = (("Body", VALID_FROM_NUMBER, [VALID_TO_NUMBER]) for _ in range(10))
numSent = send_mass_sms(datatuple, connection=connection)
self.assertEqual(numSent, 10)
self.assertEqual(len(mail.outbox), 0)
@override_settings(
SMS_BACKEND='smsish.sms.backends.filebased.SMSBackend',
EMAIL_FILE_PATH="outbox",)
class SendSMSUsingFilebasedTestCase(TestCase):
"""
TODO: Test that files are actually written: see https://github.com/django/django/blob/master/tests/mail/tests.py#L876.
"""
def setUp(self):
self.sms = SMSMessage(
"Body",
VALID_FROM_NUMBER,
[VALID_TO_NUMBER]
)
self.sms_no_recipients = SMSMessage("Body", VALID_TO_NUMBER, [])
def test_send(self):
sms = self.sms
numSent = sms.send()
self.assertEqual(numSent, 1)
self.assertEqual(len(mail.outbox), 0)
def test_send_sms(self):
numSent = send_sms("Body", VALID_FROM_NUMBER, [VALID_TO_NUMBER])
self.assertEqual(numSent, 1)
self.assertEqual(len(mail.outbox), 0)
def test_send_to_nobody(self):
sms = self.sms_no_recipients
numSent = sms.send()
self.assertEqual(numSent, 0)
self.assertEqual(len(mail.outbox), 0)
def test_send_mass_sms(self):
from smsish.sms import get_sms_connection
with get_sms_connection(settings.SMS_BACKEND) as connection:
datatuple = (("Body", VALID_FROM_NUMBER, [VALID_TO_NUMBER]) for _ in range(10))
numSent = send_mass_sms(datatuple, connection=connection)
self.assertEqual(numSent, 10)
self.assertEqual(len(mail.outbox), 0)
@override_settings(SMS_BACKEND='smsish.sms.backends.locmem.SMSBackend')
class SendSMSUsingLocmemTestCase(TestCase):
def setUp(self):
self.sms = SMSMessage(
"Body",
VALID_FROM_NUMBER,
[VALID_TO_NUMBER]
)
self.sms_no_recipients = SMSMessage("Body", VALID_TO_NUMBER, [])
def test_send(self):
sms = self.sms
numSent = sms.send()
self.assertEqual(numSent, 1)
self.assertEqual(len(mail.outbox), 1)
def test_send_sms(self):
numSent = send_sms("Body", VALID_FROM_NUMBER, [VALID_TO_NUMBER])
self.assertEqual(numSent, 1)
self.assertEqual(len(mail.outbox), 1)
def test_send_to_nobody(self):
sms = self.sms_no_recipients
numSent = sms.send()
self.assertEqual(numSent, 0)
self.assertEqual(len(mail.outbox), 0)
def test_send_mass_sms(self):
from django.conf import settings
from smsish.sms import get_sms_connection
with get_sms_connection(settings.SMS_BACKEND) as connection:
datatuple = (("Body", VALID_FROM_NUMBER, [VALID_TO_NUMBER]) for _ in range(10))
numSent = send_mass_sms(datatuple, connection=connection)
self.assertEqual(numSent, 10)
self.assertEqual(len(mail.outbox), 10)
@override_settings(
SMS_BACKEND='smsish.sms.backends.mailtrap.SMSBackend',
SMSISH_MAILTRAP_SMS_BACKEND_EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend")
class SendSMSUsingMailtrapTestCase(TestCase):
def setUp(self):
self.sms = SMSMessage(
"Body",
VALID_FROM_NUMBER,
[VALID_TO_NUMBER]
)
self.sms_no_recipients = SMSMessage("Body", VALID_TO_NUMBER, [])
def test_send(self):
sms = self.sms
numSent = sms.send()
self.assertEqual(numSent, [1])
self.assertEqual(len(mail.outbox), 1)
def test_send_sms(self):
results = send_sms("Body", VALID_FROM_NUMBER, [VALID_TO_NUMBER])
self.assertEqual(results, [1])
self.assertEqual(len(mail.outbox), 1)
def test_send_to_nobody(self):
sms = self.sms_no_recipients
results = sms.send()
self.assertEqual(results, 0)
self.assertEqual(len(mail.outbox), 0)
def test_send_mass_sms(self):
from django.conf import settings
from smsish.sms import get_sms_connection
with get_sms_connection(settings.SMS_BACKEND) as connection:
datatuple = (("Body", VALID_FROM_NUMBER, [VALID_TO_NUMBER]) for _ in range(10))
results = send_mass_sms(datatuple, connection=connection)
self.assertEqual(sum(results), 10)
self.assertEqual(len(mail.outbox), 10)
@override_settings(SMS_BACKEND='smsish.sms.backends.rq.SMSBackend')
@override_settings(SMSISH_RQ_SMS_BACKEND='smsish.sms.backends.locmem.SMSBackend')
@override_settings(TESTING=True)
class SendSMSUsingRQTestCase(TestCase):
def setUp(self):
self.sms_no_recipients = SMSMessage("Body", VALID_TO_NUMBER, [])
def get_new_sms_message(self):
return SMSMessage(
"Body",
VALID_FROM_NUMBER,
[VALID_TO_NUMBER]
)
def test_send(self):
with self.assertRaises(AssertionError):
sms = self.get_new_sms_message()
numSent = sms.send()
self.assertEqual(numSent, 1)
self.assertEqual(len(mail.outbox), 1)
def process_jobs(self):
from django_rq import get_worker
get_worker().work(burst=True)
def test_send_with_connection(self):
# from django.conf import settings
from smsish.sms import get_sms_connection
sms = self.get_new_sms_message()
with get_sms_connection() as connection:
jobs = connection.send_messages([sms])
self.assertEqual(len(jobs), 1)
# http://python-rq.org/docs/testing/
# https://github.com/ui/django-rq#testing-tip
self.assertEqual(len(mail.outbox), 1)
self.process_jobs()
for job in jobs:
self.assertTrue(job.id)
self.assertEqual(job.args[0].body, sms.body)
def test_send_sms(self):
with self.assertRaises(AssertionError):
numSent = send_sms("Body", VALID_FROM_NUMBER, [VALID_TO_NUMBER])
self.assertEqual(numSent, 1)
self.assertEqual(len(mail.outbox), 1)
def test_send_to_nobody(self):
sms = self.sms_no_recipients
numSent = sms.send()
self.assertEqual(numSent, 0)
self.assertEqual(len(mail.outbox), 0)
def test_send_mass_sms(self):
from django.conf import settings
from smsish.sms import get_sms_connection
with self.assertRaises(NotImplementedError):
with get_sms_connection(settings.SMS_BACKEND) as connection:
datatuple = (("Body", VALID_FROM_NUMBER, [VALID_TO_NUMBER]) for _ in range(10))
numSent = send_mass_sms(datatuple, connection=connection)
self.assertEqual(numSent, 10)
self.assertEqual(len(mail.outbox), 0)
TEST_SMTP_BACKENDS = False
if TEST_SMTP_BACKENDS:
@override_settings(
SMS_BACKEND='smsish.sms.backends.smtp.SMSBackend',
EMAIL_HOST="127.0.0.1",
EMAIL_PORT=1025,
EMAIL_HOST_USER="",
EMAIL_HOST_PASSWORD="",
EMAIL_USE_TLS=False,)
class SendSMSUsingSMTPTestCase(TestCase):
def setUp(self):
self.sms = SMSMessage(
"Body",
VALID_FROM_NUMBER,
[VALID_TO_NUMBER]
)
self.sms_no_recipients = SMSMessage("Body", VALID_TO_NUMBER, [])
def test_send(self):
sms = self.sms
numSent = sms.send()
self.assertEqual(numSent, 1)
def test_send_sms(self):
numSent = send_sms("Body", VALID_FROM_NUMBER, [VALID_TO_NUMBER])
self.assertEqual(numSent, 1)
def test_send_to_nobody(self):
sms = self.sms_no_recipients
numSent = sms.send()
self.assertEqual(numSent, 0)
self.assertEqual(len(mail.outbox), 0)
def test_send_mass_sms(self):
with captured_stdout() as stdout:
datatuple = (("Body", VALID_FROM_NUMBER, [VALID_TO_NUMBER]) for _ in range(10))
numSent = send_mass_sms(datatuple)
self.assertEqual(numSent, 10)
self.assertEqual(len(mail.outbox), 0)
output = stdout.getvalue()
self.assertTrue("Subject: None" in output)
self.assertTrue("From: +15005550006" in output)
self.assertTrue("To: +15005550006" in output)
self.assertTrue("Body" in output)
@override_settings(SMS_BACKEND='smsish.sms.backends.mailcatcher.SMSBackend')
class SendSMSUsingMailCatcherTestCase(TestCase):
def setUp(self):
self.sms = SMSMessage(
"Body",
VALID_FROM_NUMBER,
[VALID_TO_NUMBER]
)
self.sms_no_recipients = SMSMessage("Body", VALID_TO_NUMBER, [])
def test_send(self):
sms = self.sms
numSent = sms.send()
self.assertEqual(numSent, 1)
def test_send_sms(self):
numSent = send_sms("Body", VALID_FROM_NUMBER, [VALID_TO_NUMBER])
self.assertEqual(numSent, 1)
def test_send_to_nobody(self):
sms = self.sms_no_recipients
numSent = sms.send()
self.assertEqual(numSent, 0)
self.assertEqual(len(mail.outbox), 0)
def test_send_mass_sms(self):
with captured_stdout() as stdout:
datatuple = (("Body", VALID_FROM_NUMBER, [VALID_TO_NUMBER]) for _ in range(10))
numSent = send_mass_sms(datatuple)
self.assertEqual(numSent, 10)
self.assertEqual(len(mail.outbox), 0)
output = stdout.getvalue()
self.assertTrue("Subject: None" in output)
self.assertTrue("From: +15005550006" in output)
self.assertTrue("To: +15005550006" in output)
self.assertTrue("Body" in output)
|
{
"content_hash": "34a21bfb80ad51d109d58f3e98c206a7",
"timestamp": "",
"source": "github",
"line_count": 416,
"max_line_length": 119,
"avg_line_length": 31.139423076923077,
"alnum_prop": 0.7096649683495445,
"repo_name": "RyanBalfanz/django-smsish",
"id": "64e3865eb023943de97beddc5dd941909e462c5d",
"size": "12954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smsish/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "284"
},
{
"name": "Python",
"bytes": "32226"
}
],
"symlink_target": ""
}
|
import collections
from .base import Logger
class Dynamic(Logger):
'''Dynamic logger allowing delayed computation of values.'''
def __getitem__(self, key):
'''Return value referenced by *key*.
If the value is a callable, then call it and return the result. In
addition store the computed result for future use.
'''
value = self._mapping[key]
if isinstance(value, collections.Callable):
self[key] = value = value()
return value
|
{
"content_hash": "e4a4d3b5a80c19e6504eef433ff93155",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 74,
"avg_line_length": 25.5,
"alnum_prop": 0.6294117647058823,
"repo_name": "4degrees/sawmill",
"id": "76d3bd4f95693f4d25cb06f4ee10663bffc31962",
"size": "615",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "source/sawmill/logger/dynamic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "73463"
}
],
"symlink_target": ""
}
|
from datetime import datetime
import logging
import sqlparse
from past.builtins import basestring
import pandas as pd
from sqlalchemy import (
Column, Integer, String, ForeignKey, Text, Boolean,
DateTime,
)
import sqlalchemy as sa
from sqlalchemy import asc, and_, desc, select
from sqlalchemy.sql.expression import TextAsFrom
from sqlalchemy.orm import backref, relationship
from sqlalchemy.sql import table, literal_column, text, column
from flask import escape, Markup
from flask_appbuilder import Model
from flask_babel import lazy_gettext as _
from superset import db, utils, import_util, sm
from superset.connectors.base.models import BaseDatasource, BaseColumn, BaseMetric
from superset.utils import DTTM_ALIAS, QueryStatus
from superset.models.helpers import QueryResult
from superset.models.core import Database
from superset.jinja_context import get_template_processor
from superset.models.helpers import set_perm
class TableColumn(Model, BaseColumn):
"""ORM object for table columns, each table can have multiple columns"""
__tablename__ = 'table_columns'
table_id = Column(Integer, ForeignKey('tables.id'))
table = relationship(
'SqlaTable',
backref=backref('columns', cascade='all, delete-orphan'),
foreign_keys=[table_id])
is_dttm = Column(Boolean, default=False)
expression = Column(Text, default='')
python_date_format = Column(String(255))
database_expression = Column(String(255))
export_fields = (
'table_id', 'column_name', 'verbose_name', 'is_dttm', 'is_active',
'type', 'groupby', 'count_distinct', 'sum', 'avg', 'max', 'min',
'filterable', 'expression', 'description', 'python_date_format',
'database_expression'
)
@property
def sqla_col(self):
name = self.column_name
if not self.expression:
col = column(self.column_name).label(name)
else:
col = literal_column(self.expression).label(name)
return col
def get_time_filter(self, start_dttm, end_dttm):
col = self.sqla_col.label('__time')
return and_(
col >= text(self.dttm_sql_literal(start_dttm)),
col <= text(self.dttm_sql_literal(end_dttm)),
)
def get_timestamp_expression(self, time_grain):
"""Getting the time component of the query"""
expr = self.expression or self.column_name
if not self.expression and not time_grain:
return column(expr, type_=DateTime).label(DTTM_ALIAS)
if time_grain:
pdf = self.python_date_format
if pdf in ('epoch_s', 'epoch_ms'):
# if epoch, translate to DATE using db specific conf
db_spec = self.table.database.db_engine_spec
if pdf == 'epoch_s':
expr = db_spec.epoch_to_dttm().format(col=expr)
elif pdf == 'epoch_ms':
expr = db_spec.epoch_ms_to_dttm().format(col=expr)
grain = self.table.database.grains_dict().get(time_grain, '{col}')
expr = grain.function.format(col=expr)
return literal_column(expr, type_=DateTime).label(DTTM_ALIAS)
@classmethod
def import_obj(cls, i_column):
def lookup_obj(lookup_column):
return db.session.query(TableColumn).filter(
TableColumn.table_id == lookup_column.table_id,
TableColumn.column_name == lookup_column.column_name).first()
return import_util.import_simple_obj(db.session, i_column, lookup_obj)
def dttm_sql_literal(self, dttm):
"""Convert datetime object to a SQL expression string
If database_expression is empty, the internal dttm
will be parsed as the string with the pattern that
the user inputted (python_date_format)
If database_expression is not empty, the internal dttm
will be parsed as the sql sentence for the database to convert
"""
tf = self.python_date_format or '%Y-%m-%d %H:%M:%S.%f'
if self.database_expression:
return self.database_expression.format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
elif tf == 'epoch_s':
return str((dttm - datetime(1970, 1, 1)).total_seconds())
elif tf == 'epoch_ms':
return str((dttm - datetime(1970, 1, 1)).total_seconds() * 1000.0)
else:
s = self.table.database.db_engine_spec.convert_dttm(
self.type or '', dttm)
return s or "'{}'".format(dttm.strftime(tf))
class SqlMetric(Model, BaseMetric):
"""ORM object for metrics, each table can have multiple metrics"""
__tablename__ = 'sql_metrics'
table_id = Column(Integer, ForeignKey('tables.id'))
table = relationship(
'SqlaTable',
backref=backref('metrics', cascade='all, delete-orphan'),
foreign_keys=[table_id])
expression = Column(Text)
export_fields = (
'metric_name', 'verbose_name', 'metric_type', 'table_id', 'expression',
'description', 'is_restricted', 'd3format')
@property
def sqla_col(self):
name = self.metric_name
return literal_column(self.expression).label(name)
@property
def perm(self):
return (
"{parent_name}.[{obj.metric_name}](id:{obj.id})"
).format(obj=self,
parent_name=self.table.full_name) if self.table else None
@classmethod
def import_obj(cls, i_metric):
def lookup_obj(lookup_metric):
return db.session.query(SqlMetric).filter(
SqlMetric.table_id == lookup_metric.table_id,
SqlMetric.metric_name == lookup_metric.metric_name).first()
return import_util.import_simple_obj(db.session, i_metric, lookup_obj)
class SqlaTable(Model, BaseDatasource):
"""An ORM object for SqlAlchemy table references"""
type = "table"
query_language = 'sql'
metric_class = SqlMetric
column_class = TableColumn
__tablename__ = 'tables'
table_name = Column(String(250))
main_dttm_col = Column(String(250))
database_id = Column(Integer, ForeignKey('dbs.id'), nullable=False)
fetch_values_predicate = Column(String(1000))
user_id = Column(Integer, ForeignKey('ab_user.id'))
owner = relationship(
sm.user_model,
backref='tables',
foreign_keys=[user_id])
database = relationship(
'Database',
backref=backref('tables', cascade='all, delete-orphan'),
foreign_keys=[database_id])
schema = Column(String(255))
sql = Column(Text)
baselink = "tablemodelview"
export_fields = (
'table_name', 'main_dttm_col', 'description', 'default_endpoint',
'database_id', 'offset', 'cache_timeout', 'schema',
'sql', 'params')
__table_args__ = (
sa.UniqueConstraint(
'database_id', 'schema', 'table_name',
name='_customer_location_uc'),)
def __repr__(self):
return self.name
@property
def connection(self):
return str(self.database)
@property
def description_markeddown(self):
return utils.markdown(self.description)
@property
def link(self):
name = escape(self.name)
return Markup(
'<a href="{self.explore_url}">{name}</a>'.format(**locals()))
@property
def schema_perm(self):
"""Returns schema permission if present, database one otherwise."""
return utils.get_schema_perm(self.database, self.schema)
def get_perm(self):
return (
"[{obj.database}].[{obj.table_name}]"
"(id:{obj.id})").format(obj=self)
@property
def name(self):
if not self.schema:
return self.table_name
return "{}.{}".format(self.schema, self.table_name)
@property
def full_name(self):
return utils.get_datasource_full_name(
self.database, self.table_name, schema=self.schema)
@property
def dttm_cols(self):
l = [c.column_name for c in self.columns if c.is_dttm]
if self.main_dttm_col and self.main_dttm_col not in l:
l.append(self.main_dttm_col)
return l
@property
def num_cols(self):
return [c.column_name for c in self.columns if c.is_num]
@property
def any_dttm_col(self):
cols = self.dttm_cols
if cols:
return cols[0]
@property
def html(self):
t = ((c.column_name, c.type) for c in self.columns)
df = pd.DataFrame(t)
df.columns = ['field', 'type']
return df.to_html(
index=False,
classes=(
"dataframe table table-striped table-bordered "
"table-condensed"))
@property
def sql_url(self):
return self.database.sql_url + "?table_name=" + str(self.table_name)
@property
def time_column_grains(self):
return {
"time_columns": self.dttm_cols,
"time_grains": [grain.name for grain in self.database.grains()]
}
def get_col(self, col_name):
columns = self.columns
for col in columns:
if col_name == col.column_name:
return col
@property
def data(self):
d = super(SqlaTable, self).data
if self.type == 'table':
grains = self.database.grains() or []
if grains:
grains = [(g.name, g.name) for g in grains]
d['granularity_sqla'] = utils.choicify(self.dttm_cols)
d['time_grain_sqla'] = grains
return d
def values_for_column(self, column_name, limit=10000):
"""Runs query against sqla to retrieve some
sample values for the given column.
"""
cols = {col.column_name: col for col in self.columns}
target_col = cols[column_name]
tp = self.get_template_processor()
db_engine_spec = self.database.db_engine_spec
qry = (
select([target_col.sqla_col])
.select_from(self.get_from_clause(tp, db_engine_spec))
.distinct(column_name)
)
if limit:
qry = qry.limit(limit)
if self.fetch_values_predicate:
tp = self.get_template_processor()
qry = qry.where(tp.process_template(self.fetch_values_predicate))
engine = self.database.get_sqla_engine()
sql = "{}".format(
qry.compile(
engine, compile_kwargs={"literal_binds": True}, ),
)
df = pd.read_sql_query(sql=sql, con=engine)
return [row[0] for row in df.to_records(index=False)]
def get_template_processor(self, **kwargs):
return get_template_processor(
table=self, database=self.database, **kwargs)
def get_query_str(self, query_obj):
engine = self.database.get_sqla_engine()
qry = self.get_sqla_query(**query_obj)
sql = str(
qry.compile(
engine,
compile_kwargs={"literal_binds": True}
)
)
logging.info(sql)
sql = sqlparse.format(sql, reindent=True)
return sql
def get_sqla_table(self):
tbl = table(self.table_name)
if self.schema:
tbl.schema = self.schema
return tbl
def get_from_clause(self, template_processor=None, db_engine_spec=None):
# Supporting arbitrary SQL statements in place of tables
if self.sql:
from_sql = self.sql
if template_processor:
from_sql = template_processor.process_template(from_sql)
if db_engine_spec:
from_sql = db_engine_spec.escape_sql(from_sql)
return TextAsFrom(sa.text(from_sql), []).alias('expr_qry')
return self.get_sqla_table()
def get_sqla_query( # sqla
self,
groupby, metrics,
granularity,
from_dttm, to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=15,
timeseries_limit_metric=None,
row_limit=None,
inner_from_dttm=None,
inner_to_dttm=None,
orderby=None,
extras=None,
columns=None,
form_data=None):
"""Querying any sqla table from this common interface"""
template_kwargs = {
'from_dttm': from_dttm,
'groupby': groupby,
'metrics': metrics,
'row_limit': row_limit,
'to_dttm': to_dttm,
'form_data': form_data,
}
template_processor = self.get_template_processor(**template_kwargs)
db_engine_spec = self.database.db_engine_spec
# For backward compatibility
if granularity not in self.dttm_cols:
granularity = self.main_dttm_col
# Database spec supports join-free timeslot grouping
time_groupby_inline = db_engine_spec.time_groupby_inline
cols = {col.column_name: col for col in self.columns}
metrics_dict = {m.metric_name: m for m in self.metrics}
if not granularity and is_timeseries:
raise Exception(_(
"Datetime column not provided as part table configuration "
"and is required by this type of chart"))
for m in metrics:
if m not in metrics_dict:
raise Exception(_("Metric '{}' is not valid".format(m)))
metrics_exprs = [metrics_dict.get(m).sqla_col for m in metrics]
timeseries_limit_metric = metrics_dict.get(timeseries_limit_metric)
timeseries_limit_metric_expr = None
if timeseries_limit_metric:
timeseries_limit_metric_expr = \
timeseries_limit_metric.sqla_col
if metrics_exprs:
main_metric_expr = metrics_exprs[0]
else:
main_metric_expr = literal_column("COUNT(*)").label("ccount")
select_exprs = []
groupby_exprs = []
if groupby:
select_exprs = []
inner_select_exprs = []
inner_groupby_exprs = []
for s in groupby:
col = cols[s]
outer = col.sqla_col
inner = col.sqla_col.label(col.column_name + '__')
groupby_exprs.append(outer)
select_exprs.append(outer)
inner_groupby_exprs.append(inner)
inner_select_exprs.append(inner)
elif columns:
for s in columns:
select_exprs.append(cols[s].sqla_col)
metrics_exprs = []
if granularity:
dttm_col = cols[granularity]
time_grain = extras.get('time_grain_sqla')
time_filters = []
if is_timeseries:
timestamp = dttm_col.get_timestamp_expression(time_grain)
select_exprs += [timestamp]
groupby_exprs += [timestamp]
# Use main dttm column to support index with secondary dttm columns
if db_engine_spec.time_secondary_columns and \
self.main_dttm_col in self.dttm_cols and \
self.main_dttm_col != dttm_col.column_name:
time_filters.append(cols[self.main_dttm_col].
get_time_filter(from_dttm, to_dttm))
time_filters.append(dttm_col.get_time_filter(from_dttm, to_dttm))
select_exprs += metrics_exprs
qry = sa.select(select_exprs)
tbl = self.get_from_clause(template_processor, db_engine_spec)
if not columns:
qry = qry.group_by(*groupby_exprs)
where_clause_and = []
having_clause_and = []
for flt in filter:
if not all([flt.get(s) for s in ['col', 'op', 'val']]):
continue
col = flt['col']
op = flt['op']
eq = flt['val']
col_obj = cols.get(col)
if col_obj:
if op in ('in', 'not in'):
values = []
for v in eq:
# For backwards compatibility and edge cases
# where a column data type might have changed
if isinstance(v, basestring):
v = v.strip("'").strip('"')
if col_obj.is_num:
v = utils.string_to_num(v)
# Removing empty strings and non numeric values
# targeting numeric columns
if v is not None:
values.append(v)
cond = col_obj.sqla_col.in_(values)
if op == 'not in':
cond = ~cond
where_clause_and.append(cond)
else:
if col_obj.is_num:
eq = utils.string_to_num(flt['val'])
if op == '==':
where_clause_and.append(col_obj.sqla_col == eq)
elif op == '!=':
where_clause_and.append(col_obj.sqla_col != eq)
elif op == '>':
where_clause_and.append(col_obj.sqla_col > eq)
elif op == '<':
where_clause_and.append(col_obj.sqla_col < eq)
elif op == '>=':
where_clause_and.append(col_obj.sqla_col >= eq)
elif op == '<=':
where_clause_and.append(col_obj.sqla_col <= eq)
elif op == 'LIKE':
where_clause_and.append(col_obj.sqla_col.like(eq))
if extras:
where = extras.get('where')
if where:
where = template_processor.process_template(where)
where_clause_and += [sa.text('({})'.format(where))]
having = extras.get('having')
if having:
having = template_processor.process_template(having)
having_clause_and += [sa.text('({})'.format(having))]
if granularity:
qry = qry.where(and_(*(time_filters + where_clause_and)))
else:
qry = qry.where(and_(*where_clause_and))
qry = qry.having(and_(*having_clause_and))
if groupby:
qry = qry.order_by(desc(main_metric_expr))
elif orderby:
for col, ascending in orderby:
direction = asc if ascending else desc
qry = qry.order_by(direction(col))
if row_limit:
qry = qry.limit(row_limit)
if is_timeseries and \
timeseries_limit and groupby and not time_groupby_inline:
# some sql dialects require for order by expressions
# to also be in the select clause -- others, e.g. vertica,
# require a unique inner alias
inner_main_metric_expr = main_metric_expr.label('mme_inner__')
inner_select_exprs += [inner_main_metric_expr]
subq = select(inner_select_exprs)
subq = subq.select_from(tbl)
inner_time_filter = dttm_col.get_time_filter(
inner_from_dttm or from_dttm,
inner_to_dttm or to_dttm,
)
subq = subq.where(and_(*(where_clause_and + [inner_time_filter])))
subq = subq.group_by(*inner_groupby_exprs)
ob = inner_main_metric_expr
if timeseries_limit_metric_expr is not None:
ob = timeseries_limit_metric_expr
subq = subq.order_by(desc(ob))
subq = subq.limit(timeseries_limit)
on_clause = []
for i, gb in enumerate(groupby):
on_clause.append(
groupby_exprs[i] == column(gb + '__'))
tbl = tbl.join(subq.alias(), and_(*on_clause))
return qry.select_from(tbl)
def query(self, query_obj):
qry_start_dttm = datetime.now()
sql = self.get_query_str(query_obj)
status = QueryStatus.SUCCESS
error_message = None
df = None
try:
df = self.database.get_df(sql, self.schema)
except Exception as e:
status = QueryStatus.FAILED
logging.exception(e)
error_message = (
self.database.db_engine_spec.extract_error_message(e))
return QueryResult(
status=status,
df=df,
duration=datetime.now() - qry_start_dttm,
query=sql,
error_message=error_message)
def get_sqla_table_object(self):
return self.database.get_table(self.table_name, schema=self.schema)
def fetch_metadata(self):
"""Fetches the metadata for the table and merges it in"""
try:
table = self.get_sqla_table_object()
except Exception:
raise Exception(_(
"Table doesn't seem to exist in the specified database, "
"couldn't fetch column information"))
TC = TableColumn # noqa shortcut to class
M = SqlMetric # noqa
metrics = []
any_date_col = None
db_dialect = self.database.get_sqla_engine().dialect
for col in table.columns:
try:
datatype = "{}".format(col.type).upper()
except Exception as e:
datatype = "UNKNOWN"
logging.error(
"Unrecognized data type in {}.{}".format(table, col.name))
logging.exception(e)
dbcol = (
db.session
.query(TC)
.filter(TC.table == self)
.filter(TC.column_name == col.name)
.first()
)
db.session.flush()
if not dbcol:
dbcol = TableColumn(column_name=col.name, type=datatype)
dbcol.groupby = dbcol.is_string
dbcol.filterable = dbcol.is_string
dbcol.sum = dbcol.is_num
dbcol.avg = dbcol.is_num
dbcol.is_dttm = dbcol.is_time
db.session.merge(self)
self.columns.append(dbcol)
if not any_date_col and dbcol.is_time:
any_date_col = col.name
quoted = "{}".format(col.compile(dialect=db_dialect))
if dbcol.sum:
metrics.append(M(
metric_name='sum__' + dbcol.column_name,
verbose_name='sum__' + dbcol.column_name,
metric_type='sum',
expression="SUM({})".format(quoted)
))
if dbcol.avg:
metrics.append(M(
metric_name='avg__' + dbcol.column_name,
verbose_name='avg__' + dbcol.column_name,
metric_type='avg',
expression="AVG({})".format(quoted)
))
if dbcol.max:
metrics.append(M(
metric_name='max__' + dbcol.column_name,
verbose_name='max__' + dbcol.column_name,
metric_type='max',
expression="MAX({})".format(quoted)
))
if dbcol.min:
metrics.append(M(
metric_name='min__' + dbcol.column_name,
verbose_name='min__' + dbcol.column_name,
metric_type='min',
expression="MIN({})".format(quoted)
))
if dbcol.count_distinct:
metrics.append(M(
metric_name='count_distinct__' + dbcol.column_name,
verbose_name='count_distinct__' + dbcol.column_name,
metric_type='count_distinct',
expression="COUNT(DISTINCT {})".format(quoted)
))
dbcol.type = datatype
db.session.merge(self)
db.session.commit()
metrics.append(M(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
expression="COUNT(*)"
))
for metric in metrics:
m = (
db.session.query(M)
.filter(M.metric_name == metric.metric_name)
.filter(M.table_id == self.id)
.first()
)
metric.table_id = self.id
if not m:
db.session.add(metric)
db.session.commit()
if not self.main_dttm_col:
self.main_dttm_col = any_date_col
@classmethod
def import_obj(cls, i_datasource, import_time=None):
"""Imports the datasource from the object to the database.
Metrics and columns and datasource will be overrided if exists.
This function can be used to import/export dashboards between multiple
superset instances. Audit metadata isn't copies over.
"""
def lookup_sqlatable(table):
return db.session.query(SqlaTable).join(Database).filter(
SqlaTable.table_name == table.table_name,
SqlaTable.schema == table.schema,
Database.id == table.database_id,
).first()
def lookup_database(table):
return db.session.query(Database).filter_by(
database_name=table.params_dict['database_name']).one()
return import_util.import_datasource(
db.session, i_datasource, lookup_database, lookup_sqlatable,
import_time)
@classmethod
def query_datasources_by_name(
cls, session, database, datasource_name, schema=None):
query = (
session.query(cls)
.filter_by(database_id=database.id)
.filter_by(table_name=datasource_name)
)
if schema:
query = query.filter_by(schema=schema)
return query.all()
sa.event.listen(SqlaTable, 'after_insert', set_perm)
sa.event.listen(SqlaTable, 'after_update', set_perm)
|
{
"content_hash": "3f13bcf84901ef9cd8998f5dd35d32bd",
"timestamp": "",
"source": "github",
"line_count": 715,
"max_line_length": 86,
"avg_line_length": 36.546853146853145,
"alnum_prop": 0.5465156327733344,
"repo_name": "lina9527/easybi",
"id": "6dccdf874ad1ce17c9d38134b364aa7b49da3765",
"size": "26131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "connectors/sqla/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "76785"
},
{
"name": "HTML",
"bytes": "3048686"
},
{
"name": "JavaScript",
"bytes": "635778"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "566451"
},
{
"name": "Shell",
"bytes": "326"
}
],
"symlink_target": ""
}
|
from os.path import exists
from setuptools import setup
setup(name='strategies',
version='0.2.1',
description='Strategic Programming in python',
url='http://github.com/logpy/strategies',
author='Matthew Rocklin',
author_email='mrocklin@gmail.com',
install_requires=open('requirements.txt').read().split('\n'),
license='BSD',
packages=['strategies', 'strategies.branch'],
long_description=open('README.md').read() if exists("README.md") else "",
zip_safe=False)
|
{
"content_hash": "58f9cb8bbedf15de4d4deab97910b838",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 79,
"avg_line_length": 37.285714285714285,
"alnum_prop": 0.6590038314176245,
"repo_name": "mrocklin/strategies",
"id": "992044d5039d474b6a142d30f05442e271b37ea1",
"size": "522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "19302"
}
],
"symlink_target": ""
}
|
import unittest
import re
import sys
import os
import glob
import helper
from mockable_test_result import MockableTestResult
from runner import path_to_enlightenment
from libs.colorama import init, Fore, Style
init() # init colorama
class Sensei(MockableTestResult):
def __init__(self, stream):
unittest.TestResult.__init__(self)
self.stream = stream
self.prevTestClassName = None
self.tests = path_to_enlightenment.koans()
self.pass_count = 0
self.lesson_pass_count = 0
self.all_lessons = None
def startTest(self, test):
MockableTestResult.startTest(self, test)
if helper.cls_name(test) != self.prevTestClassName:
self.prevTestClassName = helper.cls_name(test)
if not self.failures:
self.stream.writeln()
self.stream.writeln("{0}{1}Thinking {2}".format(
Fore.RESET, Style.NORMAL, helper.cls_name(test)))
if helper.cls_name(test) != 'AboutAsserts':
self.lesson_pass_count += 1
def addSuccess(self, test):
if self.passesCount():
MockableTestResult.addSuccess(self, test)
self.stream.writeln( \
" {0}{1}{2} has expanded your awareness.{3}{4}" \
.format(Fore.GREEN, Style.BRIGHT, test._testMethodName, \
Fore.RESET, Style.NORMAL))
self.pass_count += 1
def addError(self, test, err):
# Having 1 list for errors and 1 list for failures would mess with
# the error sequence
self.addFailure(test, err)
def passesCount(self):
return not (self.failures and helper.cls_name(self.failures[0][0]) !=
self.prevTestClassName)
def addFailure(self, test, err):
MockableTestResult.addFailure(self, test, err)
def sortFailures(self, testClassName):
table = list()
for test, err in self.failures:
if helper.cls_name(test) == testClassName:
m = re.search("(?<= line )\d+" ,err)
if m:
tup = (int(m.group(0)), test, err)
table.append(tup)
if table:
return sorted(table)
else:
return None
def firstFailure(self):
if not self.failures: return None
table = self.sortFailures(helper.cls_name(self.failures[0][0]))
if table:
return (table[0][1], table[0][2])
else:
return None
def learn(self):
self.errorReport()
self.stream.writeln("")
self.stream.writeln("")
self.stream.writeln(self.report_progress())
self.stream.writeln("")
self.stream.writeln(self.say_something_zenlike())
if self.failures: sys.exit(-1)
self.stream.writeln(
"\n{0}**************************************************" \
.format(Fore.RESET))
self.stream.writeln("\n{0}That was the last one, well done!" \
.format(Fore.MAGENTA))
self.stream.writeln(
"\nIf you want more, take a look at about_extra_credit_task.py")
def errorReport(self):
problem = self.firstFailure()
if not problem: return
test, err = problem
self.stream.writeln(" {0}{1}{2} has damaged your "
"karma.".format(Fore.RED, Style.BRIGHT, test._testMethodName))
self.stream.writeln("\n{0}{1}You have not yet reached enlightenment ..." \
.format(Fore.RESET, Style.NORMAL))
self.stream.writeln("{0}{1}{2}".format(Fore.RED, \
Style.BRIGHT, self.scrapeAssertionError(err)))
self.stream.writeln("")
self.stream.writeln("{0}{1}Please meditate on the following code:" \
.format(Fore.RESET, Style.NORMAL))
self.stream.writeln("{0}{1}{2}{3}{4}".format(Fore.YELLOW, Style.BRIGHT, \
self.scrapeInterestingStackDump(err), Fore.RESET, Style.NORMAL))
def scrapeAssertionError(self, err):
if not err: return ""
error_text = ""
count = 0
for line in err.splitlines():
m = re.search("^[^^ ].*$",line)
if m and m.group(0):
count+=1
if count>1:
error_text += (" " + line.strip()).rstrip() + '\n'
return error_text.strip('\n')
def scrapeInterestingStackDump(self, err):
if not err:
return ""
lines = err.splitlines()
sep = '@@@@@SEP@@@@@'
scrape = ""
for line in lines:
m = re.search("^ File .*$",line)
if m and m.group(0):
scrape += '\n' + line
m = re.search("^ \w(\w)+.*$",line)
if m and m.group(0):
scrape += sep + line
lines = scrape.splitlines()
scrape = ""
for line in lines:
m = re.search("^.*[/\\\\]koans[/\\\\].*$",line)
if m and m.group(0):
scrape += line + '\n'
return scrape.replace(sep, '\n').strip('\n')
def report_progress(self):
koans_complete = self.pass_count
lessons_complete = self.lesson_pass_count
koans_remaining = self.total_koans() - koans_complete
lessons_remaining = self.total_lessons() - lessons_complete
sent1 = "You have completed {0} koans and " \
"{1} lessons.\n".format(koans_complete, lessons_complete)
sent2 = "You are now {0} koans and {1} lessons away from " \
"reaching enlightenment.".format(koans_remaining, lessons_remaining)
return sent1+sent2
# Hat's tip to Tim Peters for the zen statements from The 'Zen
# of Python' (http://www.python.org/dev/peps/pep-0020/)
#
# Also a hat's tip to Ara T. Howard for the zen statements from his
# metakoans Ruby Quiz (http://rubyquiz.com/quiz67.html) and
# Edgecase's later permutation in the Ruby Koans
def say_something_zenlike(self):
if self.failures:
turn = self.pass_count % 37
zenness = "";
if turn == 0:
zenness = "Beautiful is better than ugly."
elif turn == 1 or turn == 2:
zenness = "Explicit is better than implicit."
elif turn == 3 or turn == 4:
zenness = "Simple is better than complex."
elif turn == 5 or turn == 6:
zenness = "Complex is better than complicated."
elif turn == 7 or turn == 8:
zenness = "Flat is better than nested."
elif turn == 9 or turn == 10:
zenness = "Sparse is better than dense."
elif turn == 11 or turn == 12:
zenness = "Readability counts."
elif turn == 13 or turn == 14:
zenness = "Special cases aren't special enough to " \
"break the rules."
elif turn == 15 or turn == 16:
zenness = "Although practicality beats purity."
elif turn == 17 or turn == 18:
zenness = "Errors should never pass silently."
elif turn == 19 or turn == 20:
zenness = "Unless explicitly silenced."
elif turn == 21 or turn == 22:
zenness = "In the face of ambiguity, refuse the " \
"temptation to guess."
elif turn == 23 or turn == 24:
zenness = "There should be one-- and preferably only " \
"one --obvious way to do it."
elif turn == 25 or turn == 26:
zenness = "Although that way may not be obvious at " \
"first unless you're Dutch."
elif turn == 27 or turn == 28:
zenness = "Now is better than never."
elif turn == 29 or turn == 30:
zenness = "Although never is often better than right " \
"now."
elif turn == 31 or turn == 32:
zenness = "If the implementation is hard to explain, " \
"it's a bad idea."
elif turn == 33 or turn == 34:
zenness = "If the implementation is easy to explain, " \
"it may be a good idea."
else:
zenness = "Namespaces are one honking great idea -- " \
"let's do more of those!"
return "{0}{1}{2}{3}".format(Fore.CYAN, zenness, Fore.RESET, Style.NORMAL);
else:
return "{0}Nobody ever expects the Spanish Inquisition." \
.format(Fore.CYAN)
# Hopefully this will never ever happen!
return "The temple is collapsing! Run!!!"
def total_lessons(self):
all_lessons = self.filter_all_lessons()
if all_lessons:
return len(all_lessons)
else:
return 0
def total_koans(self):
return self.tests.countTestCases()
def filter_all_lessons(self):
cur_dir = os.path.split(os.path.realpath(__file__))[0]
if not self.all_lessons:
self.all_lessons = glob.glob('{0}/../koans/about*.py'.format(cur_dir))
self.all_lessons = filter(lambda filename:
"about_extra_credit" not in filename,
self.all_lessons)
return self.all_lessons
|
{
"content_hash": "005180c41346bba3c76197cd0ef754d3",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 87,
"avg_line_length": 37.523809523809526,
"alnum_prop": 0.5310913705583756,
"repo_name": "aishraj/pykons_solution",
"id": "fb93e817b5979c4599d6e1c80068cd223acd9b43",
"size": "9503",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python2/runner/sensei.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "316242"
},
{
"name": "Shell",
"bytes": "1603"
}
],
"symlink_target": ""
}
|
from PyQt5.QtCore import pyqtSignal, QTextStream, Qt
from PyQt5.QtWidgets import QApplication
from PyQt5.QtNetwork import QLocalSocket, QLocalServer
class QtSingleApplication(QApplication):
messageReceived = pyqtSignal(str)
def __init__(self, id, *argv):
super(QtSingleApplication, self).__init__(*argv)
self._id = id
self._activationWindow = None
self._activateOnMessage = False
# Is there another instance running?
self._outSocket = QLocalSocket()
self._outSocket.connectToServer(self._id)
self._isRunning = self._outSocket.waitForConnected()
if self._isRunning:
# Yes, there is.
self._outStream = QTextStream(self._outSocket)
self._outStream.setCodec('UTF-8')
else:
# No, there isn't.
self._outSocket = None
self._outStream = None
self._inSocket = None
self._inStream = None
self._server = QLocalServer()
self._server.listen(self._id)
self._server.newConnection.connect(self._onNewConnection)
def isRunning(self):
return self._isRunning
def id(self):
return self._id
def activationWindow(self):
return self._activationWindow
def setActivationWindow(self, activationWindow, activateOnMessage = True):
self._activationWindow = activationWindow
self._activateOnMessage = activateOnMessage
def activateWindow(self):
if not self._activationWindow:
return
self._activationWindow.show()
self._activationWindow.setWindowState(
self._activationWindow.windowState() & ~Qt.WindowMinimized)
self._activationWindow.raise_()
self._activationWindow.activateWindow()
def sendMessage(self, msg):
if not self._outStream:
return False
self._outStream << msg << '\n'
self._outStream.flush()
return self._outSocket.waitForBytesWritten()
def _onNewConnection(self):
if self._inSocket:
self._inSocket.readyRead.disconnect(self._onReadyRead)
self._inSocket = self._server.nextPendingConnection()
if not self._inSocket:
return
self._inStream = QTextStream(self._inSocket)
self._inStream.setCodec('UTF-8')
self._inSocket.readyRead.connect(self._onReadyRead)
if self._activateOnMessage:
self.activateWindow()
def _onReadyRead(self):
while True:
msg = self._inStream.readLine()
if not msg: break
self.messageReceived.emit(msg)
|
{
"content_hash": "0ffb2e86801bf7ba5b1e442985fe292c",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 78,
"avg_line_length": 32.36585365853659,
"alnum_prop": 0.6239638281838734,
"repo_name": "narthollis/eve-mcl",
"id": "51aa596f864c89709aae0256c25df9483cf0c2f5",
"size": "2741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mcl/gui/qtsingleapplication.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Inno Setup",
"bytes": "1839"
},
{
"name": "Python",
"bytes": "2411117"
}
],
"symlink_target": ""
}
|
"""
Adds support for Nest thermostats.
"""
import logging
from homeassistant.components.thermostat import ThermostatDevice
from homeassistant.const import (CONF_USERNAME, CONF_PASSWORD, TEMP_CELCIUS)
REQUIREMENTS = ['python-nest==2.4.0']
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the nest thermostat. """
logger = logging.getLogger(__name__)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
if username is None or password is None:
logger.error("Missing required configuration items %s or %s",
CONF_USERNAME, CONF_PASSWORD)
return
try:
import nest
except ImportError:
logger.exception(
"Error while importing dependency nest. "
"Did you maybe not install the python-nest dependency?")
return
napi = nest.Nest(username, password)
add_devices([
NestThermostat(structure, device)
for structure in napi.structures
for device in structure.devices
])
class NestThermostat(ThermostatDevice):
""" Represents a Nest thermostat within Home Assistant. """
def __init__(self, structure, device):
self.structure = structure
self.device = device
@property
def name(self):
""" Returns the name of the nest, if any. """
location = self.device.where
name = self.device.name
if location is None:
return name
else:
if name == '':
return location.capitalize()
else:
return location.capitalize() + '(' + name + ')'
@property
def unit_of_measurement(self):
""" Unit of measurement this thermostat expresses itself in. """
return TEMP_CELCIUS
@property
def device_state_attributes(self):
""" Returns device specific state attributes. """
# Move these to Thermostat Device and make them global
return {
"humidity": self.device.humidity,
"target_humidity": self.device.target_humidity,
"fan": self.device.fan,
"mode": self.device.mode
}
@property
def current_temperature(self):
""" Returns the current temperature. """
return round(self.device.temperature, 1)
@property
def target_temperature(self):
""" Returns the temperature we try to reach. """
target = self.device.target
if isinstance(target, tuple):
low, high = target
if self.current_temperature < low:
temp = low
elif self.current_temperature > high:
temp = high
else:
temp = (low + high)/2
else:
temp = target
return round(temp, 1)
@property
def is_away_mode_on(self):
""" Returns if away mode is on. """
return self.structure.away
def set_temperature(self, temperature):
""" Set new target temperature """
self.device.target = temperature
def turn_away_mode_on(self):
""" Turns away on. """
self.structure.away = True
def turn_away_mode_off(self):
""" Turns away off. """
self.structure.away = False
@property
def min_temp(self):
""" Identifies min_temp in Nest API or defaults if not available. """
temp = self.device.away_temperature.low
if temp is None:
return super().min_temp
else:
return temp
@property
def max_temp(self):
""" Identifies mxn_temp in Nest API or defaults if not available. """
temp = self.device.away_temperature.high
if temp is None:
return super().max_temp
else:
return temp
def update(self):
""" Python-nest has its own mechanism for staying up to date. """
pass
|
{
"content_hash": "00d0bf679ebef75178df785260606ad1",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 77,
"avg_line_length": 28.3,
"alnum_prop": 0.5888440181726401,
"repo_name": "EricRho/home-assistant",
"id": "1de729b590d93b51501cd2e08883af34034af500",
"size": "3962",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/thermostat/nest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "813051"
},
{
"name": "Python",
"bytes": "819434"
},
{
"name": "Shell",
"bytes": "2631"
}
],
"symlink_target": ""
}
|
from azure.identity import DefaultAzureCredential
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-trafficmanager
# USAGE
python endpoint_get_external_with_geo_mapping.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = TrafficManagerManagementClient(
credential=DefaultAzureCredential(),
subscription_id="{subscription-id}",
)
response = client.endpoints.get(
resource_group_name="azuresdkfornetautoresttrafficmanager2191",
profile_name="azuresdkfornetautoresttrafficmanager8224",
endpoint_type="ExternalEndpoints",
endpoint_name="My%20external%20endpoint",
)
print(response)
# x-ms-original-file: specification/trafficmanager/resource-manager/Microsoft.Network/preview/2022-04-01-preview/examples/Endpoint-GET-External-WithGeoMapping.json
if __name__ == "__main__":
main()
|
{
"content_hash": "800b055ade65fb239304e607a04e46a7",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 163,
"avg_line_length": 36.94285714285714,
"alnum_prop": 0.748646558391338,
"repo_name": "Azure/azure-sdk-for-python",
"id": "57c927263d592256ab75ad32f94ac4a860a727e4",
"size": "1761",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/trafficmanager/azure-mgmt-trafficmanager/generated_samples/endpoint_get_external_with_geo_mapping.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
from poetry.core.spdx.helpers import license_by_id
def test_classifier_name() -> None:
license = license_by_id("lgpl-3.0-or-later")
assert (
license.classifier_name
== "GNU Lesser General Public License v3 or later (LGPLv3+)"
)
def test_classifier_name_no_classifer_osi_approved() -> None:
license = license_by_id("LiLiQ-R-1.1")
assert license.classifier_name is None
def test_classifier_name_no_classifer() -> None:
license = license_by_id("Leptonica")
assert license.classifier_name == "Other/Proprietary License"
def test_classifier() -> None:
license = license_by_id("lgpl-3.0-or-later")
assert (
license.classifier
== "License :: "
"OSI Approved :: "
"GNU Lesser General Public License v3 or later (LGPLv3+)"
)
def test_classifier_no_classifer_osi_approved() -> None:
license = license_by_id("LiLiQ-R-1.1")
assert license.classifier == "License :: OSI Approved"
def test_classifier_no_classifer() -> None:
license = license_by_id("Leptonica")
assert license.classifier == "License :: Other/Proprietary License"
def test_proprietary_license() -> None:
license = license_by_id("Proprietary")
assert license.classifier == "License :: Other/Proprietary License"
def test_custom_license() -> None:
license = license_by_id("Amazon Software License")
assert license.classifier == "License :: Other/Proprietary License"
|
{
"content_hash": "69a78ffc121a1270cd8afe6a4079ebf4",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 71,
"avg_line_length": 25.372881355932204,
"alnum_prop": 0.6686706746826987,
"repo_name": "python-poetry/poetry-core",
"id": "6cada0735db9dac55acb94093aad386ff3e6ab4b",
"size": "1497",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/spdx/test_license.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2664"
},
{
"name": "Makefile",
"bytes": "1021"
},
{
"name": "Python",
"bytes": "2084191"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
}
|
ALIASES = {
'esri/basemaps': 'esriBasemaps',
'esri/config': 'esriConfig',
'esri/graphic': 'Graphic',
'esri/IdentityManager': 'esriId',
'esri/kernel': 'esriNS',
'esri/lang': 'esriLang',
'esri/map': 'Map',
'esri/request': 'esriRequest',
'esri/undoManager': 'UndoManager',
'esri/arcgis/Portal': 'arcgisPortal',
'esri/arcgis/utils': 'arcgisUtils',
'esri/geometry/jsonUtils': 'geometryJsonUtils',
'esri/layers/layer': 'Layer',
'esri/renderers/jsonUtils': 'rendererJsonUtils',
'esri/styles/basic': 'esriStylesBasic',
'esri/styles/choropleth': 'esriStylesChoropleth',
'esri/styles/heatmap': 'esriStylesHeatmap',
'esri/styles/size': 'esriStylesSize',
'esri/styles/type': 'esriStylesType',
'esri/symbols/jsonUtils': 'symbolJsonUtils',
'esri/tasks/locator': 'Locator',
'esri/tasks/query': 'Query',
'esri/toolbars/draw': 'Draw',
'esri/toolbars/edit': 'Edit',
'esri/toolbars/navigation': 'Navigation',
'dojo/fx': 'coreFx',
'dojo/_base/fx': 'baseFx'
}
|
{
"content_hash": "c50fa1a32a9f96beacae12c8712e1a71",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 53,
"avg_line_length": 36.10344827586207,
"alnum_prop": 0.6418338108882522,
"repo_name": "agrc/AmdButler",
"id": "c04ad5e3053d6c2874b4d589ae780f7c655d55e7",
"size": "1160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/preferred_argument_aliases.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "93757"
},
{
"name": "Python",
"bytes": "27022"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class DtickrangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(
self,
plotly_name="dtickrange",
parent_name="barpolar.marker.colorbar.tickformatstop",
**kwargs
):
super(DtickrangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
items=kwargs.pop(
"items",
[
{"valType": "any", "editType": "colorbars"},
{"valType": "any", "editType": "colorbars"},
],
),
role=kwargs.pop("role", "info"),
**kwargs
)
|
{
"content_hash": "cdd663084f68af36a6255b45f8793152",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 75,
"avg_line_length": 31.875,
"alnum_prop": 0.5058823529411764,
"repo_name": "plotly/python-api",
"id": "3d500b8a8f117f4f3ec5adf764b88e2c102d2725",
"size": "765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/barpolar/marker/colorbar/tickformatstop/_dtickrange.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
'''
Imaging B and C configs together. Also using Arecibo data as the input model.
Optional feathering of Arecibo and combine VLA image together.
'''
import os
from tasks import *
from taskinit import *
import casac
# vis_path = 'VLA/archival/'
vis = "M33_b_c.ms"
out_root = 'M33_206_b_c'
# mod_path = 'Arecibo/'
model = 'M33_model.image'
mask = 'M33_newmask.image'
combine_configs = False
do_cvel = False
do_dirtyimage = False
do_clean_1chan = False
do_clean = True
do_export = True
if combine_configs:
print("Combining the reduced B and C configuration data.")
concat(vis=["../b_config/M33_bconfig_all.split.contsub",
"../c_config/M33.split.contsub"],
concatvis=vis,
timesort=True, freqtol='10MHz')
if do_cvel:
os.system('rm -rf '+out_root+'.cvel')
cvel(vis=vis, outputvis=out_root+'.cvel', mode='channel',
nchan=-1, start=0, width=1, restfreq='1420.40575177MHz',
outframe='LSRK', phasecenter='J2000 01h33m50.904 +30d39m35.79')
vis = out_root+'.cvel'
if do_dirtyimage:
# First creates a dirty cube to examine
print "Making dirty cube."
os.system('rm -rf '+out_root+'.dirty*')
clean(vis=vis, imagename=out_root+'.dirty_mask_model', restfreq='1420.40575177MHz',
mode='channel', width=1, nchan=205, start=10,
cell='1.5arcsec', multiscale=[0, 3, 9, 27, 200],
threshold='2.0mJy/beam', imagermode='mosaic',
imsize=[4096, 4096], weighting='natural', robust=0.0, niter=0,
pbcor=True, interpolation='linear', usescratch=True,
phasecenter='J2000 01h33m50.904 +30d39m35.79', veltype='radio',
outframe='LSRK', modelimage=model, mask=mask)
if do_clean_1chan:
# os.system('rm -rf '+out_root+'.chan*')
# For multiscale, 1 pixel = 3 arcsec in C, 1 pix = 1.5 arcsec in B.
model_110 = "../../../Arecibo/M33_model_channel_110.image"
mask_110 = "../../../Arecibo/M33_newmask_channel_110.image"
clean(vis=vis, imagename=out_root+'.chan_110', field='M33*',
restfreq='1420.40575177MHz', mode='channel', nterms=1,
width=1, nchan=1, start=110, cell='1.5arcsec',
imsize=[4096, 4096], weighting='natural', niter=50000,
threshold='2.2mJy/beam', imagermode='mosaic', gain=0.5,
multiscale=[0, 8, 20], interactive=False,
pbcor=True, interpolation='linear', usescratch=True,
phasecenter='J2000 01h33m50.904 +30d39m35.79', veltype='radio',
outframe='LSRK', modelimage=model_110, mask=mask_110)
model_65 = "../../../Arecibo/M33_model_channel_65.image"
mask_65 = "../../../Arecibo/M33_newmask_channel_65.image"
clean(vis=vis, imagename=out_root+'.chan_65', field='M33*',
restfreq='1420.40575177MHz', mode='channel', nterms=1,
width=1, nchan=1, start=65, cell='1.5arcsec',
imsize=[4096, 4096], weighting='natural', niter=50000,
threshold='2.2mJy/beam', imagermode='mosaic', gain=0.5,
multiscale=[0, 8, 20], interactive=False,
pbcor=True, interpolation='linear', usescratch=True,
phasecenter='J2000 01h33m50.904 +30d39m35.79', veltype='radio',
outframe='LSRK', modelimage=model_65, mask=mask_65)
if do_clean:
print 'Making cleaned cube.'
os.system('rm -rf '+out_root+'.clean')
parallel = False
if not parallel:
clean(vis=vis, imagename=out_root+'.clean', field='M33*',
restfreq='1420.40575177MHz',
mode='channel', width=1, nchan=205, start=10,
cell='1.5arcsec', multiscale=[0, 8, 20],
threshold='2.2mJy/beam', imagermode='mosaic', gain=0.5,
imsize=[4096, 4096], weighting='natural', robust=0.0, niter=50000,
pbcor=True, interpolation='linear', usescratch=True,
phasecenter='J2000 01h33m50.904 +30d39m35.79', veltype='radio',
outframe='LSRK', modelimage=model, mask=mask)
else:
pclean(vis=vis, imagename=out_root+'.clean', field='M33*',
restfreq='1420.40575177MHz',
mode='channel', width=1, nchan=205, start=10,
cell='1.5arcsec', alg='multiscale', scales=[0, 8, 20],
threshold='2.2mJy/beam', ftmachine='mosaic', gain=0.5,
imsize=[4096, 4096], weighting='natural', robust=0.0, niter=50000,
pbcor=True, interpolation='linear', usescratch=True,
phasecenter='J2000 01h33m50.904 +30d39m35.79', veltype='radio',
outframe='LSRK', modelimage=model, mask=mask,
clusterdef='')
if do_export:
print "Exporting fits files."
# Clean cube
exportfits(imagename=out_root+'.clean.image',
fitsimage=out_root+'.fits', overwrite=True,
velocity=True, dropstokes=True)
# Residual cube
exportfits(imagename=out_root+'.clean.residual',
fitsimage=out_root+'_resid.fits', overwrite=True,
velocity=True, dropstokes=True)
# Export the primary beam image for the cleaned cube
exportfits(imagename=out_root+'.clean.flux',
fitsimage=out_root+'_flux.fits', overwrite=True,
velocity=True, dropstokes=True)
|
{
"content_hash": "6a5ef05ac14b51c7ceb40106e8e3e6e3",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 87,
"avg_line_length": 37.04255319148936,
"alnum_prop": 0.6147807773310358,
"repo_name": "e-koch/canfar_scripts",
"id": "a2a0835c15a8d48447d9e6e2a3221399fd877689",
"size": "5224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "img_pipe/archival_data/m33_arch_206_all_img.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1022821"
},
{
"name": "Shell",
"bytes": "23747"
}
],
"symlink_target": ""
}
|
import ConfigParser
import os
import rdflib
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
try:
HUMFREY_CONFIG_FILE = os.environ['HUMFREY_CONFIG_FILE']
except KeyError:
raise RuntimeError('You need to provide a HUMFREY_CONFIG_FILE environment variable pointing to an ini file')
config = ConfigParser.ConfigParser()
config.read(HUMFREY_CONFIG_FILE)
def relative_path(*args):
# Return None if any of the arguments are None.
if all(args):
return os.path.abspath(os.path.join(os.path.dirname(HUMFREY_CONFIG_FILE), *args))
config = dict((':'.join([sec, key]), config.get(sec, key)) for sec in config.sections() for key in config.options(sec))
DEBUG = config.get('main:debug') == 'true'
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
#DATABASES = type('NonZeroDict', (dict,), {'__nonzero__': lambda self:True, '__contains__': lambda self, item: True})()
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# Make this unique, and don't share it with anybody.
SECRET_KEY = config.get('main:secret_key')
if not SECRET_KEY:
raise RuntimeError("You need to specify a secret_key in your config.ini.")
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django_hosts.middleware.HostsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'humfrey.base.middleware.AccessControlAllowOriginMiddleware',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'django_hosts',
'django_conneg',
'djcelery',
'object_permissions',
'humfrey.base',
'humfrey.desc',
'humfrey.linkeddata',
'humfrey.results',
'humfrey.sparql',
'humfrey.streaming',
'humfrey.thumbnail',
'humfrey.utils',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
IMAGE_TYPES = ('foaf:Image',)
IMAGE_PROPERTIES = ('foaf:depiction',)
# Pull e-mail configuration from config file.
EMAIL_HOST = config.get('email:host')
EMAIL_PORT = int(config.get('email:port') or 0) or None
EMAIL_HOST_USER = config.get('email:user')
EMAIL_HOST_PASSWORD = config.get('email:password')
SERVER_EMAIL = config.get('email:server_email_address')
DEFAULT_FROM_EMAIL = config.get('email:default_from_email_address')
# Endpoint details
ENDPOINT_QUERY = config.get('endpoints:query')
ENDPOINT_UPDATE = config.get('endpoints:update')
ENDPOINT_GRAPH = config.get('endpoints:graph')
CACHE_BACKEND = config.get('supporting_services:cache_backend') or 'locmem://'
# Cache directories
CACHE_DIRECTORY = relative_path(config.get('main:cache_directory'))
IMAGE_CACHE_DIRECTORY = relative_path(config.get('images:cache_directory')) \
or (CACHE_DIRECTORY and os.path.join(CACHE_DIRECTORY, 'images'))
UPDATE_CACHE_DIRECTORY = relative_path(config.get('update:cache_directory')) \
or (CACHE_DIRECTORY and os.path.join(CACHE_DIRECTORY, 'update'))
REDIS_PARAMS = {'host': config.get('supporting_services:redis_host') or 'localhost',
'port': int(config.get('supporting_services:redis_port') or 6379),
'db': int(config.get('supporting_services:redis_db') or 0)}
REDIS_PARAMS = {} if config.get('supporting_services:disable_redis_support') == 'true' else REDIS_PARAMS
# These will be linked directly, others will be described using /doc/?uri=… syntax.
SERVED_DOMAINS = ()
ID_MAPPING = ()
ADDITIONAL_NAMESPACES = {}
DOWNLOADER_DEFAULT_DIR = config.get('downloader:default_dir')
if DOWNLOADER_DEFAULT_DIR:
DOWNLOADER_DEFAULT_DIR = relative_path(DOWNLOADER_DEFAULT_DIR)
LOG_FILENAMES = {}
for k in ('access', 'pingback', 'query'):
v = config.get('logging:%s' % k, None)
if v:
v = relative_path(v)
LOG_FILENAMES[k] = v
del k, v
if config.get('main:log_to_stderr') == 'true':
import logging
import sys
log_level = config.get('main:log_level') or 'WARNING'
if log_level not in ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'):
raise RuntimeError('log_level in config file must be one of DEBUG, INFO, WARNING, ERROR and CRITICAL')
logging.basicConfig(stream=sys.stderr,
level=getattr(logging, log_level))
if config.get('google_analytics:key'):
INSTALLED_APPS += ('humfrey.analytics',)
TEMPLATE_CONTEXT_PROCESSORS += ('humfrey.analytics.context_processors.google_analytics',)
GOOGLE_ANALYTICS = {
'key': config['google_analytics:key'],
'zero_timeouts': config.get('google_analytics:zero_timeouts') == 'true',
}
DOC_RDF_PROCESSORS = (
'humfrey.desc.rdf_processors.doc_meta',
'humfrey.desc.rdf_processors.formats',
)
# Load pingback functionality if specified in the config.
if config.get('pingback:enabled') == 'true':
MIDDLEWARE_CLASSES += ('humfrey.pingback.middleware.PingbackMiddleware',)
INSTALLED_APPS += ('humfrey.pingback',)
DOC_RDF_PROCESSORS += ('humfrey.pingback.rdf_processors.pingback',)
PINGBACK_TARGET_DOMAINS = (config.get('pingback:target_domains') or '').split()
PINGBACK_DATASET = rdflib.URIRef(config['pingback:dataset'])
if config.get('update:enabled') == 'true':
INSTALLED_APPS += ('humfrey.update',)
UPDATE_TRANSFORMS = (
'humfrey.update.transform.base.Requires',
'humfrey.update.transform.construct.Construct',
'humfrey.update.transform.html.HTMLToXML',
'humfrey.update.transform.local_file.LocalFile',
'humfrey.update.transform.normalize.Normalize',
'humfrey.update.transform.retrieve.Retrieve',
'humfrey.update.transform.sharepoint.SharePoint',
'humfrey.update.transform.shell.Shell',
'humfrey.update.transform.spreadsheet.GnumericToTEI',
'humfrey.update.transform.spreadsheet.ODSToTEI',
'humfrey.update.transform.union.Union',
'humfrey.update.transform.upload.Upload',
'humfrey.update.transform.vocabularies.VocabularyLoader',
'humfrey.update.transform.xslt.XSLT',
)
UPDATE_TRANSFORM_REPOSITORY = config.get('update:transform_repository')
if config.get('ckan:enabled') == 'true':
CKAN_API_KEY = config.get('ckan:api_key')
CKAN_GROUPS = set()
CKAN_TAGS = set()
SPARQL_FORM_COMMON_PREFIXES = (config.get('sparql:form_common_prefixes') or 'true') == 'true'
CACHE_TIMES = {
'page': 1800,
}
CACHE_TIMES.update(dict((k[6:], int(v)) for k, v in config.iteritems() if k.startswith('cache:')))
GRAPH_BASE = config.get('main:graph_base') or 'http://localhost/graph/'
|
{
"content_hash": "9c210005f9702c8c8c3b87394ba8fa78",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 119,
"avg_line_length": 36.54708520179372,
"alnum_prop": 0.7012269938650306,
"repo_name": "ox-it/humfrey",
"id": "f84c54d8108584851b8a009fb79467f412196bf7",
"size": "8207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "humfrey/settings/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2990"
},
{
"name": "HTML",
"bytes": "39767"
},
{
"name": "JavaScript",
"bytes": "658"
},
{
"name": "Python",
"bytes": "375681"
}
],
"symlink_target": ""
}
|
from collections import defaultdict
from luigi import six
import luigi
import subprocess
import sys
class WikiJpTitles(luigi.ExternalTask):
def output(self):
return luigi.LocalTarget('data/jawiki-20141017-page.csv')
class WikiJpLangLinks(luigi.ExternalTask):
def output(self):
return luigi.LocalTarget('data/jawiki-20141017-langlinks.csv')
class P01_JpKrTitleJoin(luigi.Task):
def output(self):
return luigi.LocalTarget('work/01_jpkr_title_joined.csv')
def requires(self):
return [WikiJpTitles(),WikiJpLangLinks()]
def run(self):
jp_titles = {}
jp_lang_links = {}
jpkr_joined_titles = []
with self.input()[0].open('r') as in_file:
for line in in_file:
try:
record = line.strip().split(',')
jp_titles[int(record[0])] = record[1]
except ValueError, e:
continue
with self.input()[1].open('r') as in_file:
for line in in_file:
record = line.strip().split(',')
if record[1] != 'ko' :
continue
jp_lang_links[int(record[0])] = record[2]
for key in jp_titles.keys():
try :
jpkr_joined_titles.append([key,jp_titles[key],jp_lang_links[key]])
except IndexError, e:
continue
except KeyError, e:
continue
with self.output().open('w') as out_file:
for joined_title in jpkr_joined_titles:
print >> out_file, joined_title[1] + ',' + joined_title[2]
class P02_GenerateKanjiHanjaDic(luigi.Task):
target = 'work/02_jpkr_kanji_hanja_dic.csv'
def output(self):
return luigi.LocalTarget(self.target)
def requires(self):
return P01_JpKrTitleJoin()
def run(self):
proc = subprocess.Popen(['ruby','02_generate_kanji_hanja_dic.rb'],stdin=self.input().open('r'),stdout=subprocess.PIPE)
with self.output().open('w') as out_file:
ret = proc.communicate()[0]
print >> out_file, ret
class P03_AppendFieldKanji2Hangul(luigi.Task):
target = 'work/03_jpkr_kanji_hanja_dic_kanjihangul.csv'
def output(self):
return luigi.LocalTarget(self.target)
def requires(self):
return P02_GenerateKanjiHanjaDic()
def run(self):
proc = subprocess.Popen(['ruby','03_append_field_kanji2hangul.rb'],stdin=self.input().open('r'),stdout=subprocess.PIPE)
with self.output().open('w') as out_file:
ret = proc.communicate()[0]
print >> out_file, ret
class P04_ExtractKanjiSamePronunciation(luigi.Task):
target = 'work/04_jpkr_kanji_hanja_dic_extracted.csv'
def output(self):
return luigi.LocalTarget(self.target)
def requires(self):
return P03_AppendFieldKanji2Hangul()
def run(self):
proc = subprocess.Popen(['ruby','04_extract_kanji_same_pronunciation.rb'],stdin=self.input().open('r'),stdout=subprocess.PIPE)
with self.output().open('w') as out_file:
ret = proc.communicate()[0]
print >> out_file, ret
class P05_KanaWakati(luigi.Task):
target = 'work/05_jpkr_kanji_hanja_dic_kana_wakati.csv'
def output(self):
return luigi.LocalTarget(self.target)
def requires(self):
return P04_ExtractKanjiSamePronunciation()
def run(self):
proc = subprocess.Popen(['ruby','05_kana_wakati.rb'],stdin=self.input().open('r'),stdout=subprocess.PIPE)
with self.output().open('w') as out_file:
ret = proc.communicate()[0]
print >> out_file, ret
class P06_ParsePronunciation(luigi.Task):
target = 'work/06_jpkr_kanji_hanja_dic_parsed_pronunciation.csv'
def output(self):
return luigi.LocalTarget(self.target)
def requires(self):
return P05_KanaWakati()
def run(self):
proc = subprocess.Popen(['ruby','06_parse_pronunciation.rb'],stdin=self.input().open('r'),stdout=subprocess.PIPE)
with self.output().open('w') as out_file:
ret = proc.communicate()[0]
print >> out_file, ret
class P07_CleansingAndAppendLabels(luigi.Task):
target = 'work/07_jpkr_kanji_hanja_dic.csv'
def output(self):
return luigi.LocalTarget(self.target)
def requires(self):
return P06_ParsePronunciation()
def run(self):
proc = subprocess.Popen(['ruby','07_cleansing_append_labels.rb'],stdin=self.input().open('r'),stdout=subprocess.PIPE)
with self.output().open('w') as out_file:
ret = proc.communicate()[0]
print >> out_file, ret
if __name__ == '__main__':
luigi.run(main_task_cls=P07_CleansingAndAppendLabels,local_scheduler=True )
|
{
"content_hash": "18c9d05219903ff4df06d13173b62fee",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 130,
"avg_line_length": 35.47154471544715,
"alnum_prop": 0.6704102681641073,
"repo_name": "colspan/wikipedia-kanji-hanja-analysis",
"id": "ae3049c771d2bb70975f47e25201fc43341fbaf2",
"size": "4406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wikipedia-jpkr-analysis.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4406"
},
{
"name": "Ruby",
"bytes": "103713"
},
{
"name": "Shell",
"bytes": "429"
}
],
"symlink_target": ""
}
|
from pydantic import BaseModel
from typing import List, Optional, Union
from sqlalchemy.sql.sqltypes import Enum
class AnimParam(BaseModel):
name: str
data_type: Union[str, int, bool, float]
value: Union[str, int, bool, float]
default_value: Optional[Union[str, int, bool, float]]
class AnimationParams(BaseModel):
required: List[AnimParam]
optional: Optional[List[AnimParam]]
|
{
"content_hash": "ffd100ed00eee0d592664ddd691f095a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 57,
"avg_line_length": 25.375,
"alnum_prop": 0.7315270935960592,
"repo_name": "skypanther/clc",
"id": "b1a72e39c268c10b80e56a5b945c452d192837fd",
"size": "406",
"binary": false,
"copies": "1",
"ref": "refs/heads/pilitgui2",
"path": "pilitgui2/api/schemas/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6781"
},
{
"name": "JavaScript",
"bytes": "44195"
}
],
"symlink_target": ""
}
|
"""This example adds a text add that uses advanced features of upgraded URLS.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: AdGroupAdService.mutate
"""
__author__ = 'Mark Saniscalchi'
from googleads import adwords
from googleads import errors
ADGROUP_ID = 'INSERT_ADGROUP_ID_HERE'
def main(client, adgroup_id):
# Initialize appropriate service.
adgroup_ad_service = client.GetService('AdGroupAdService', version='v201502')
# Create the text ad
text_ad = {
'xsi_type': 'TextAd',
'headline': 'Luxury Cruise to Mars',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'displayUrl': 'www.example.com',
# Specify a tracking URL for 3rd party tracking provider. You may specify
# one at customer, campaign, ad group, ad, criterion or feed item levels.
'trackingUrlTemplate': ('http://tracker.example.com/?season={_season}'
'&promocode={_promocode}&u={lpurl}'),
'urlCustomParameters': {
'parameters': [
# Since your tracking URL has two custom parameters, provide
# their values too. This can be provided at campaign, ad group,
# ad, criterion, or feed item levels.
{
'key': 'season',
'value': 'christmas'
},
{
'key': 'promocode',
'value': 'NYC123'
}
]
},
# Specify a list of final URLs. This field cannot be set if URL
# field is set, or finalUrls is unset. This may be specified at ad,
# criterion, and feed item levels.
'finalUrls': [
'http://www.example.com/cruise/space/',
'http://www.example.com/locations/mars/'
],
# Specify a list of final mobile URLs. This field cannot be set if URL
# field is set, or finalUrls is unset. This may be specified at ad,
# criterion, and feed item levels.
'finalMobileUrls': [
'http://mobile.example.com/cruise/space/',
'http://mobile.example.com/locations/mars/'
]
}
text_adgroup_ad = {
'adGroupId': adgroup_id,
'ad': text_ad,
# Optional: Set the status.
'status': 'PAUSED'
}
operations = [{
'operator': 'ADD',
'operand': text_adgroup_ad
}]
response = adgroup_ad_service.mutate(operations)
if 'value' in response:
for adgroup_ad in response['value']:
print ('AdGroupAd with ID %s and display URL \'%s\'was added.'
% (adgroup_ad['ad']['id'], adgroup_ad['ad']['displayUrl']))
print 'Upgraded URL properties:'
print 'Final Urls: %s' % adgroup_ad['ad']['finalUrls']
print 'Final Mobile URLs: %s' % adgroup_ad['ad']['finalMobileUrls']
print ('Tracking URL template: %s'
% adgroup_ad['ad']['trackingUrlTemplate'])
print 'Custom parameters: %s' % adgroup_ad['ad']['urlCustomParameters']
else:
raise errors.GoogleAdsError('Failed to create AdGroupAd.')
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, ADGROUP_ID)
|
{
"content_hash": "3d95ee48819a2f24c98ea747c23d7e74",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 79,
"avg_line_length": 34.785714285714285,
"alnum_prop": 0.612496333235553,
"repo_name": "cctaylor/googleads-python-lib",
"id": "a3032cb26e530e826aefb60b385e7e106942a249",
"size": "4027",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/adwords/v201502/advanced_operations/add_text_ad_with_upgraded_urls.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "HTML",
"bytes": "8336"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2774292"
}
],
"symlink_target": ""
}
|
from django.http import HttpResponse, HttpRequest, HttpResponseRedirect
from django.template import RequestContext, Template, Context
from django.shortcuts import render_to_response
from django.core.context_processors import csrf
from django.views.decorators.csrf import csrf_exempt
from django.core.urlresolvers import reverse
from django.http import Http404
#from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required, permission_required
from django.conf import settings
from django import forms
from django.db.models import Q
from django.db.models import Count
from server import utils
from django.shortcuts import render_to_response, get_object_or_404, redirect
import unicodecsv as csv
import plistlib
import base64
import bz2
import hashlib
import json
from datetime import datetime
import urllib2
from xml.etree import ElementTree
from models import *
from server.models import *
def is_postgres():
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.postgresql_psycopg2':
return True
else:
return False
def decode_to_string(base64bz2data):
'''Decodes an inventory submission, which is a plist-encoded
list, compressed via bz2 and base64 encoded.'''
try:
bz2data = base64.b64decode(base64bz2data)
return bz2.decompress(bz2data)
except Exception:
return ''
def unique_apps(inventory, input_type='object'):
found = []
for inventory_item in inventory:
found_flag = False
if input_type == 'dict':
for found_item in found:
if (inventory_item['name'] == found_item['name'] and
inventory_item['version'] == found_item['version'] and
inventory_item['bundleid'] == found_item['bundleid'] and
inventory_item['bundlename'] == found_item['bundlename'] and
inventory_item['path'] == found_item['path']):
found_flag = True
break
if found_flag == False:
found_item = {}
found_item['name'] = inventory_item['name']
found_item['version'] = inventory_item['version']
found_item['bundleid'] = inventory_item['bundleid']
found_item['bundlename'] = inventory_item['bundlename']
found_item['path'] = inventory_item['path']
found.append(found_item)
else:
for found_item in found:
if (inventory_item.name == found_item['name'] and
inventory_item.version == found_item['version'] and
inventory_item.bundleid == found_item['bundleid'] and
inventory_item.bundlename == found_item['bundlename'] and
inventory_item.path == found_item['path']):
found_flag = True
break
if found_flag == False:
found_item = {}
found_item['name'] = inventory_item.name
found_item['version'] = inventory_item.version
found_item['bundleid'] = inventory_item.bundleid
found_item['bundlename'] = inventory_item.bundlename
found_item['path'] = inventory_item.path
found.append(found_item)
return found
@login_required
def inventory_list(request, page='front', theID=None):
user = request.user
title=None
inventory_name = request.GET.get('name')
inventory_version = request.GET.get('version', '0')
inventory_bundleid = request.GET.get('bundleid', '')
inventory_path = request.GET.get('path')
inventory_bundlename = request.GET.get('bundlename','')
# get a list of machines (either from the BU or the group)
if page == 'front':
# get all machines
if user.userprofile.level == 'GA':
machines = Machine.objects.all()
else:
machines = Machine.objects.none()
for business_unit in user.businessunit_set.all():
for group in business_unit.machinegroup_set.all():
machines = machines | group.machine_set.all()
if page == 'bu_dashboard':
# only get machines for that BU
# Need to make sure the user is allowed to see this
machines = utils.getBUmachines(theID)
if page == 'group_dashboard' or page == 'machine_group':
# only get machines from that group
machine_group = get_object_or_404(MachineGroup, pk=theID)
# check that the user has access to this
machines = Machine.objects.filter(machine_group=machine_group)
if page == 'machine_id':
machines = Machine.objects.filter(id=theID)
try:
page = int(request.GET.get('page'))
except:
page = 1
previous_id = page - 1
next_id = page + 1
start = (page - 1) * 25
end = page * 25
# get the InventoryItems limited to the machines we're allowed to look at
inventory = InventoryItem.objects.filter(name=inventory_name, version=inventory_version, bundleid=inventory_bundleid, bundlename=inventory_bundlename).filter(machine=machines)[start:end]
if len(inventory) != 25:
# we've not got 25 results, probably the last page
next_id = 0
c = {'user':user, 'machines': machines, 'req_type': page, 'title': title, 'bu_id': theID, 'request':request, 'inventory_name':inventory_name, 'inventory_version':inventory_version, 'inventory_bundleid':inventory_bundleid, 'inventory_bundlename':inventory_bundlename, 'previous_id': previous_id, 'next_id':next_id, 'inventory':inventory }
return render_to_response('inventory/overview_list_all.html', c, context_instance=RequestContext(request))
@csrf_exempt
def inventory_submit(request):
if request.method != 'POST':
raise Http404
# list of bundleids to ignore
bundleid_ignorelist = [
'com.apple.print.PrinterProxy'
]
submission = request.POST
serial = submission.get('serial')
machine = None
if serial:
try:
machine = Machine.objects.get(serial=serial)
except Machine.DoesNotExist:
raise Http404
compressed_inventory = submission.get('base64bz2inventory')
if compressed_inventory:
compressed_inventory = compressed_inventory.replace(" ", "+")
inventory_str = decode_to_string(compressed_inventory)
try:
inventory_list = plistlib.readPlistFromString(inventory_str)
except Exception:
inventory_list = None
if inventory_list:
try:
inventory_meta = Inventory.objects.get(machine=machine)
except Inventory.DoesNotExist:
inventory_meta = Inventory(machine=machine)
inventory_meta.sha256hash = \
hashlib.sha256(inventory_str).hexdigest()
# clear existing inventoryitems
machine.inventoryitem_set.all().delete()
# insert current inventory items
for item in inventory_list:
# skip items in bundleid_ignorelist.
if not item.get('bundleid') in bundleid_ignorelist:
i_item = machine.inventoryitem_set.create(
name=item.get('name', ''),
version=item.get('version', ''),
bundleid=item.get('bundleid', ''),
bundlename=item.get('CFBundleName', ''),
path=item.get('path', '')
)
machine.last_inventory_update = datetime.now()
inventory_meta.save()
machine.save()
return HttpResponse(
"Inventory submmitted for %s.\n" %
submission.get('serial'))
return HttpResponse("No inventory submitted.\n")
@csrf_exempt
def inventory_hash(request, serial):
sha256hash = ''
machine = None
if serial:
try:
machine = Machine.objects.get(serial=serial)
inventory_meta = Inventory.objects.get(machine=machine)
sha256hash = inventory_meta.sha256hash
except (Machine.DoesNotExist, Inventory.DoesNotExist):
pass
else:
return HttpResponse("MACHINE NOT FOUND")
return HttpResponse(sha256hash)
@login_required
def index(request):
# This really should just select on the BU's the user has access to like the
# Main page, but this will do for now
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
try:
page = int(request.GET.get('page'))
except:
page = 1
previous_id = page - 1
next_id = page + 1
start = (page - 1) * 25
end = page * 25
#inventory = InventoryItem.objects.all().values('name', 'version', 'path', 'bundleid', 'bundlename', 'id').order_by('name')
if is_postgres():
# Woohoo, you're using postgres. Let's make this fast.
print 'postgres'
inventory = InventoryItem.objects.all().values('name', 'version', 'path', 'bundleid', 'bundlename').distinct()[start:end]
else:
# Sucks to be you, you're on something else
inventory = InventoryItem.objects.all().values('name', 'version', 'path', 'bundleid', 'bundlename').distinct()
inventory = unique_apps(inventory,'dict')[start:end]
if len(inventory) != 25:
# we've not got 25 results, probably the last page
next_id = 0
c = {'user': request.user, 'inventory': inventory, 'page':'front', 'request': request, 'previous_id': previous_id, 'next_id':next_id}
return render_to_response('inventory/index.html', c, context_instance=RequestContext(request))
@login_required
def bu_inventory(request, bu_id):
user = request.user
user_level = user.userprofile.level
business_unit = get_object_or_404(BusinessUnit, pk=bu_id)
if business_unit not in user.businessunit_set.all() and user_level != 'GA':
print 'not letting you in ' + user_level
return redirect(index)
try:
page = int(request.GET.get('page'))
except:
page = 1
previous_id = page - 1
next_id = page + 1
start = (page - 1) * 25
end = page * 25
if is_postgres():
inventory = InventoryItem.objects.filter(machine__machine_group__business_unit=business_unit).values('name', 'version', 'path', 'bundleid', 'bundlename').distinct()[start:end]
else:
inventory = InventoryItem.objects.filter(machine__machine_group__business_unit=business_unit).values('name', 'version', 'path', 'bundleid', 'bundlename')
inventory = unique_apps(inventory, 'dict')[start:end]
if len(inventory) != 25:
# we've not got 25 results, probably the last page
next_id = 0
c = {'user': request.user, 'inventory': inventory, 'page':'business_unit', 'business_unit':business_unit, 'request': request, 'previous_id': previous_id, 'next_id':next_id}
return render_to_response('inventory/index.html', c, context_instance=RequestContext(request))
@login_required
def machine_group_inventory(request, group_id):
user = request.user
user_level = user.userprofile.level
machine_group = get_object_or_404(MachineGroup, pk=group_id)
business_unit = machine_group.business_unit
if business_unit not in user.businessunit_set.all() and user_level != 'GA':
print 'not letting you in ' + user_level
return redirect(index)
try:
page = int(request.GET.get('page'))
except:
page = 1
previous_id = page - 1
next_id = page + 1
start = (page - 1) * 25
end = page * 25
if is_postgres():
inventory = InventoryItem.objects.filter(machine__machine_group=machine_group).values('name', 'version', 'path', 'bundleid', 'bundlename').distinct()[start:end]
else:
inventory = InventoryItem.objects.filter(machine__machine_group=machine_group).values('name', 'version', 'path', 'bundleid', 'bundlename')
inventory = unique_apps(inventory, 'dict')[start:end]
if len(inventory) != 25:
# we've not got 25 results, probably the last page
next_id = 0
c = {'user': request.user, 'inventory': inventory, 'page':'machine_group', 'business_unit':business_unit,'machine_group':machine_group, 'request': request, 'previous_id': previous_id, 'next_id':next_id}
return render_to_response('inventory/index.html', c, context_instance=RequestContext(request))
@login_required
def machine_inventory(request, machine_id):
user = request.user
user_level = user.userprofile.level
machine = get_object_or_404(Machine, pk=machine_id)
machine_group = machine.machine_group
business_unit = machine_group.business_unit
if business_unit not in user.businessunit_set.all() and user_level != 'GA':
print 'not letting you in ' + user_level
return redirect(index)
try:
page = int(request.GET.get('page'))
except:
page = 1
previous_id = page - 1
next_id = page + 1
start = (page - 1) * 25
end = page * 25
if is_postgres():
inventory = InventoryItem.objects.filter(machine=machine).values('name', 'version', 'path', 'bundleid', 'bundlename').distinct()[start:end]
else:
inventory = InventoryItem.objects.filter(machine=machine).values('name', 'version', 'path', 'bundleid', 'bundlename')
inventory = unique_apps(inventory, 'dict')[start:end]
if len(inventory) != 25:
# we've not got 25 results, probably the last page
next_id = 0
c = {'user': request.user, 'inventory': inventory, 'page':'machine_id', 'business_unit':business_unit,'machine':machine, 'request': request, 'previous_id': previous_id, 'next_id':next_id}
return render_to_response('inventory/index.html', c, context_instance=RequestContext(request))
@login_required
def export_csv(request, page='front', theID=None):
user = request.user
title = 'Inventory Export'
inventory_name = request.GET.get('name')
inventory_version = request.GET.get('version', '0')
inventory_bundleid = request.GET.get('bundleid', '')
inventory_path = request.GET.get('path')
inventory_bundlename = request.GET.get('bundlename','')
# get a list of machines (either from the BU or the group)
if page == 'front':
# get all machines
if user.userprofile.level == 'GA':
machines = Machine.objects.all()
else:
machines = Machine.objects.none()
for business_unit in user.businessunit_set.all():
for group in business_unit.machinegroup_set.all():
machines = machines | group.machine_set.all()
if page == 'bu_dashboard':
# only get machines for that BU
# Need to make sure the user is allowed to see this
business_unit = get_object_or_404(BusinessUnit, pk=theID)
machine_groups = MachineGroup.objects.filter(business_unit=business_unit).prefetch_related('machine_set').all()
if machine_groups.count() != 0:
machines_unsorted = machine_groups[0].machine_set.all()
for machine_group in machine_groups[1:]:
machines_unsorted = machines_unsorted | machine_group.machine_set.all()
else:
machines_unsorted = None
machines=machines_unsorted
if page == 'group_dashboard':
# only get machines from that group
machine_group = get_object_or_404(MachineGroup, pk=theID)
# check that the user has access to this
machines = Machine.objects.filter(machine_group=machine_group)
if page == 'machine_id':
machines = Machine.objects.filter(id=theID)
# get the InventoryItems limited to the machines we're allowed to look at
inventoryitems = InventoryItem.objects.filter(name=inventory_name, version=inventory_version, bundleid=inventory_bundleid, bundlename=inventory_bundlename).filter(machine=machines).order_by('name')
machines = machines.filter(inventoryitem__name=inventory_name, inventoryitem__version=inventory_version, inventoryitem__bundleid=inventory_bundleid, inventoryitem__bundlename=inventory_bundlename)
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s.csv"' % title
writer = csv.writer(response)
# Fields
header_row = []
fields = Machine._meta.get_fields()
for field in fields:
if not field.is_relation and field.name != 'id' and field.name != 'report' and field.name != 'activity' and field.name != 'os_family':
header_row.append(field.name)
header_row.append('business_unit')
header_row.append('machine_group')
writer.writerow(header_row)
for machine in machines:
row = []
for name, value in machine.get_fields():
if name != 'id' and name !='machine_group' and name != 'report' and name != 'activity' and name != 'os_family':
row.append(value.strip())
row.append(machine.machine_group.business_unit.name)
row.append(machine.machine_group.name)
writer.writerow(row)
#writer.writerow([machine.serial, machine.machine_group.business_unit.name, machine.machine_group.name,
#machine.hostname, machine.operating_system, machine.memory, machine.memory_kb, machine.munki_version, machine.manifest])
return response
@login_required
def list_machines(request, page, name, version, bundleid, bundlename, path, id=None):
user = request.user
user_level = user.userprofile.level
machines = Machine.objects.all()
if page == 'group':
group = get_object_or_404(MachineGroup, pk=id)
machines = machines.filter(machine_group=group)
elif page == 'bu':
business_unit = get_object_or_404(BusinessUnit, pk=id)
machines = machines.filter(machine_group=machine_group__business_unit)
else:
if user_level == 'GA':
machines = machines
else:
for business_unit in BusinessUnit.objects.all():
if business_unit not in user.businessunit_set.all():
machines = machines.exclude(machine_group__business_unit = business_unit)
|
{
"content_hash": "3144968addbe34f11788d9b876388d9a",
"timestamp": "",
"source": "github",
"line_count": 436,
"max_line_length": 341,
"avg_line_length": 42.36009174311926,
"alnum_prop": 0.6331690941577779,
"repo_name": "chasetb/sal",
"id": "d2a749daceae5b4d0a516bf8bc1f36e905345c3e",
"size": "18469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inventory/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "192288"
},
{
"name": "HTML",
"bytes": "119776"
},
{
"name": "JavaScript",
"bytes": "683793"
},
{
"name": "Makefile",
"bytes": "2284"
},
{
"name": "Nginx",
"bytes": "1946"
},
{
"name": "Python",
"bytes": "346909"
},
{
"name": "Shell",
"bytes": "1964"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render_to_response, render
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib import auth, messages
from sms.models import *
import logging
import traceback
from django_twilio.decorators import twilio_view
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.forms.models import inlineformset_factory
@twilio_view
def inbound(request):
try:
i = Interaction(request.REQUEST.get('From', ''), request.REQUEST.get('To', ''), request.REQUEST.get('Body', ''))
return i.process()
except:
logging.warn('captured error at main level: %s', traceback.format_exc())
return
def login(request):
context = {}
populateContext(request, context)
if context['authenticated'] == True:
return HttpResponseRedirect('/dashboard/')
if request.method == 'POST':
try:
context['username'] = request.POST['inputUsername']
password = request.POST['inputPassword']
user = auth.authenticate(username=context['username'], password=password)
if user is not None:
auth.login(request, user)
return HttpResponseRedirect('/dashboard/')
else:
context['error'] = 'Username and/or Password are invalid.'
except:
context['error'] = 'Username and/or Password are invalid.'
return render(request,
'login.html',
context)
def register(request):
context = {}
populateContext(request, context)
if request.method == 'POST':
context['user_form'] = UserForm(data=request.POST)
context['info_form'] = UserInfoForm(data=request.POST)
if context['user_form'].is_valid() and context['info_form'].is_valid():
user = context['user_form'].save()
user.set_password(user.password)
user.save()
info = context['info_form'].save(commit=False)
info.user = user
info.save()
## create phoneNumbers from twi account
tw = TwiAuth(user = info)
tw.getNumbers()
return HttpResponseRedirect('/login/')
else:
print context['user_form'].errors, context['info_form'].errors
else:
context['user_form'] = UserForm()
context['info_form'] = UserInfoForm()
return render(request,
'signup.html',
context)
def home(request):
context = {}
populateContext(request, context)
return render(request,
'index.html',
context)
def logout(request):
auth.logout(request)
return HttpResponseRedirect('/')
@login_required
def dashboard(request):
context = {}
populateContext(request,context)
return render_to_response('dashboard.html', context)
@login_required
def stats(request):
context = {}
populateContext(request,context)
context['statsList'] = []
for s in context['user'].getSurveys():
context['statsList'].append(s.getStats())
return render_to_response('stat.html', context)
@login_required
def surveys(request, id=False):
context = {}
populateContext(request, context)
if request.method == 'GET' and id:
try:
context['survey'] = Survey.getById(id, request.user)
return render_to_response('survey.html', context)
except:
return HttpResponseRedirect('/surveys/')
else:
context['surveys'] = request.user.info.getSurveys()
return render_to_response('surveys.html', context)
@login_required
def questions(request, id, qid):
context = {}
populateContext(request, context)
context['survey'] = Survey.getById(id, request.user)
context['question'] = context['survey'].getQuestionById(qid)
if request.method == "POST":
context['form'] = OptionForm(data=request.POST)
if context['form'].is_valid():
option = context['form'].save(commit=False)
option.question = context['question']
option.save()
return HttpResponseRedirect('/surveys/{0}/questions/{1}/'.format(context['survey'].id, context['question'].id))
else:
print context['form'].errors
else:
context['form'] = OptionForm(initial={'question':context['question'].id})
return render(request,
'question.html',
context)
@login_required
def newquestion(request, id):
context = {}
populateContext(request, context)
survey = "";
try:
survey = Survey.getById(id, request.user)
context['id'] = survey.id
except: #TODO: get specific kind of exception
return HttpResponseRedirect('/surveys/')
if request.method == 'POST':
context['form'] = QuestionForm(data=request.POST)
if context['form'].is_valid():
question = context['form'].save(commit=False)
order = survey.getQuestionsCount() + 1
question.order = order
question.survey = survey
question.save()
return HttpResponseRedirect('/surveys/%s/' % context['id'])
else:
print context['form'].errors
else:
context['form'] = QuestionForm(initial={'survey':context['id']})
return render(request,
'createquestion.html',
context)
@login_required
def newsurvey(request, extra=False):
context = {}
populateContext(request, context)
if request.method == 'POST':
context['form'] = SurveyForm(data=request.POST)
if context['form'].is_valid():
context['form'].save()
return HttpResponseRedirect('/surveys/')
else:
print context['form'].errors
else:
context['form'] = SurveyForm()
return render(request,
'createsurvey.html',
context)
@login_required
def deletesurvey(request, id):
try:
survey = Survey.getById(id, request.user)
survey.remove()
except:
print traceback.format_exc()
return HttpResponseRedirect('/surveys/')
@login_required
def deletequestion(request, id, qid):
try:
survey = Survey.getById(id, request.user)
question = survey.getQuestionById(qid)
question.remove()
except:
print traceback.format_exc()
return HttpResponseRedirect('/surveys/{0}/'.format(id))
@login_required
def deleteoption(request, id, qid, oid):
try:
survey = Survey.getById(id, request.user)
question = survey.getQuestionById(qid)
option = question.getOptionById(oid)
option.remove()
except:
print traceback.format_exc()
return HttpResponseRedirect('/surveys/{0}/questions/{1}/'.format(id, qid))
def populateContext(request, context):
context['authenticated'] = request.user.is_authenticated()
if context['authenticated'] == True:
context['user'] = request.user.info
|
{
"content_hash": "b21e7a5f185b9649d37a9d959c21bcba",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 123,
"avg_line_length": 33.023809523809526,
"alnum_prop": 0.6256669069935111,
"repo_name": "cauanicastro/smssurveytools",
"id": "3c67bc73eb5492af36938e2752ad64cc066ad72e",
"size": "6935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sms/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "875"
},
{
"name": "C",
"bytes": "447472"
},
{
"name": "C++",
"bytes": "2005"
},
{
"name": "CSS",
"bytes": "792"
},
{
"name": "HTML",
"bytes": "15815"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "37778"
},
{
"name": "Shell",
"bytes": "3719"
}
],
"symlink_target": ""
}
|
import mmap
import sys
import warnings
import numpy as np
from astropy.io.fits.header import Header
from astropy.io.fits.util import (
_is_dask_array,
_is_int,
_is_pseudo_integer,
_pseudo_zero,
)
from astropy.io.fits.verify import VerifyWarning
from astropy.utils import isiterable, lazyproperty
from .base import BITPIX2DTYPE, DELAYED, DTYPE2BITPIX, ExtensionHDU, _ValidHDU
__all__ = ["Section", "PrimaryHDU", "ImageHDU"]
class _ImageBaseHDU(_ValidHDU):
"""FITS image HDU base class.
Attributes
----------
header
image header
data
image data
"""
standard_keyword_comments = {
"SIMPLE": "conforms to FITS standard",
"XTENSION": "Image extension",
"BITPIX": "array data type",
"NAXIS": "number of array dimensions",
"GROUPS": "has groups",
"PCOUNT": "number of parameters",
"GCOUNT": "number of groups",
}
def __init__(
self,
data=None,
header=None,
do_not_scale_image_data=False,
uint=True,
scale_back=False,
ignore_blank=False,
**kwargs,
):
from .groups import GroupsHDU
super().__init__(data=data, header=header)
if data is DELAYED:
# Presumably if data is DELAYED then this HDU is coming from an
# open file, and was not created in memory
if header is None:
# this should never happen
raise ValueError("No header to setup HDU.")
else:
# TODO: Some of this card manipulation should go into the
# PrimaryHDU and GroupsHDU subclasses
# construct a list of cards of minimal header
if isinstance(self, ExtensionHDU):
c0 = ("XTENSION", "IMAGE", self.standard_keyword_comments["XTENSION"])
else:
c0 = ("SIMPLE", True, self.standard_keyword_comments["SIMPLE"])
cards = [
c0,
("BITPIX", 8, self.standard_keyword_comments["BITPIX"]),
("NAXIS", 0, self.standard_keyword_comments["NAXIS"]),
]
if isinstance(self, GroupsHDU):
cards.append(("GROUPS", True, self.standard_keyword_comments["GROUPS"]))
if isinstance(self, (ExtensionHDU, GroupsHDU)):
cards.append(("PCOUNT", 0, self.standard_keyword_comments["PCOUNT"]))
cards.append(("GCOUNT", 1, self.standard_keyword_comments["GCOUNT"]))
if header is not None:
orig = header.copy()
header = Header(cards)
header.extend(orig, strip=True, update=True, end=True)
else:
header = Header(cards)
self._header = header
self._do_not_scale_image_data = do_not_scale_image_data
self._uint = uint
self._scale_back = scale_back
# Keep track of whether BZERO/BSCALE were set from the header so that
# values for self._orig_bzero and self._orig_bscale can be set
# properly, if necessary, once the data has been set.
bzero_in_header = "BZERO" in self._header
bscale_in_header = "BSCALE" in self._header
self._bzero = self._header.get("BZERO", 0)
self._bscale = self._header.get("BSCALE", 1)
# Save off other important values from the header needed to interpret
# the image data
self._axes = [
self._header.get("NAXIS" + str(axis + 1), 0)
for axis in range(self._header.get("NAXIS", 0))
]
# Not supplying a default for BITPIX makes sense because BITPIX
# is either in the header or should be determined from the dtype of
# the data (which occurs when the data is set).
self._bitpix = self._header.get("BITPIX")
self._gcount = self._header.get("GCOUNT", 1)
self._pcount = self._header.get("PCOUNT", 0)
self._blank = None if ignore_blank else self._header.get("BLANK")
self._verify_blank()
self._orig_bitpix = self._bitpix
self._orig_blank = self._header.get("BLANK")
# These get set again below, but need to be set to sensible defaults
# here.
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
# Set the name attribute if it was provided (if this is an ImageHDU
# this will result in setting the EXTNAME keyword of the header as
# well)
if "name" in kwargs and kwargs["name"]:
self.name = kwargs["name"]
if "ver" in kwargs and kwargs["ver"]:
self.ver = kwargs["ver"]
# Set to True if the data or header is replaced, indicating that
# update_header should be called
self._modified = False
if data is DELAYED:
if not do_not_scale_image_data and (self._bscale != 1 or self._bzero != 0):
# This indicates that when the data is accessed or written out
# to a new file it will need to be rescaled
self._data_needs_rescale = True
return
else:
# Setting data will update the header and set _bitpix, _bzero,
# and _bscale to the appropriate BITPIX for the data, and always
# sets _bzero=0 and _bscale=1.
self.data = data
# Check again for BITPIX/BSCALE/BZERO in case they changed when the
# data was assigned. This can happen, for example, if the input
# data is an unsigned int numpy array.
self._bitpix = self._header.get("BITPIX")
# Do not provide default values for BZERO and BSCALE here because
# the keywords will have been deleted in the header if appropriate
# after scaling. We do not want to put them back in if they
# should not be there.
self._bzero = self._header.get("BZERO")
self._bscale = self._header.get("BSCALE")
# Handle case where there was no BZERO/BSCALE in the initial header
# but there should be a BSCALE/BZERO now that the data has been set.
if not bzero_in_header:
self._orig_bzero = self._bzero
if not bscale_in_header:
self._orig_bscale = self._bscale
@classmethod
def match_header(cls, header):
"""
_ImageBaseHDU is sort of an abstract class for HDUs containing image
data (as opposed to table data) and should never be used directly.
"""
raise NotImplementedError
@property
def is_image(self):
return True
@property
def section(self):
"""
Access a section of the image array without loading the entire array
into memory. The :class:`Section` object returned by this attribute is
not meant to be used directly by itself. Rather, slices of the section
return the appropriate slice of the data, and loads *only* that section
into memory.
Sections are useful for retrieving a small subset of data from a remote
file that has been opened with the ``use_fsspec=True`` parameter.
For example, you can use this feature to download a small cutout from
a large FITS image hosted in the Amazon S3 cloud (see the
:ref:`astropy:fits-cloud-files` section of the Astropy
documentation for more details.)
For local files, sections are mostly obsoleted by memmap support, but
should still be used to deal with very large scaled images.
Note that sections cannot currently be written to. Moreover, any
in-memory updates to the image's ``.data`` property may not be
reflected in the slices obtained via ``.section``. See the
:ref:`astropy:data-sections` section of the documentation for
more details.
"""
return Section(self)
@property
def shape(self):
"""
Shape of the image array--should be equivalent to ``self.data.shape``.
"""
# Determine from the values read from the header
return tuple(reversed(self._axes))
@property
def header(self):
return self._header
@header.setter
def header(self, header):
self._header = header
self._modified = True
self.update_header()
@lazyproperty
def data(self):
"""
Image/array data as a `~numpy.ndarray`.
Please remember that the order of axes on an Numpy array are opposite
of the order specified in the FITS file. For example for a 2D image
the "rows" or y-axis are the first dimension, and the "columns" or
x-axis are the second dimension.
If the data is scaled using the BZERO and BSCALE parameters, this
attribute returns the data scaled to its physical values unless the
file was opened with ``do_not_scale_image_data=True``.
"""
if len(self._axes) < 1:
return
data = self._get_scaled_image_data(self._data_offset, self.shape)
self._update_header_scale_info(data.dtype)
return data
@data.setter
def data(self, data):
if "data" in self.__dict__ and self.__dict__["data"] is not None:
if self.__dict__["data"] is data:
return
else:
self._data_replaced = True
was_unsigned = _is_pseudo_integer(self.__dict__["data"].dtype)
else:
self._data_replaced = True
was_unsigned = False
if (
data is not None
and not isinstance(data, np.ndarray)
and not _is_dask_array(data)
):
# Try to coerce the data into a numpy array--this will work, on
# some level, for most objects
try:
data = np.array(data)
except Exception:
raise TypeError(
f"data object {data!r} could not be coerced into an ndarray"
)
if data.shape == ():
raise TypeError(
f"data object {data!r} should have at least one dimension"
)
self.__dict__["data"] = data
self._modified = True
if self.data is None:
self._axes = []
else:
# Set new values of bitpix, bzero, and bscale now, but wait to
# revise original values until header is updated.
self._bitpix = DTYPE2BITPIX[data.dtype.name]
self._bscale = 1
self._bzero = 0
self._blank = None
self._axes = list(data.shape)
self._axes.reverse()
# Update the header, including adding BZERO/BSCALE if new data is
# unsigned. Does not change the values of self._bitpix,
# self._orig_bitpix, etc.
self.update_header()
if data is not None and was_unsigned:
self._update_header_scale_info(data.dtype)
# Keep _orig_bitpix as it was until header update is done, then
# set it, to allow easier handling of the case of unsigned
# integer data being converted to something else. Setting these here
# is needed only for the case do_not_scale_image_data=True when
# setting the data to unsigned int.
# If necessary during initialization, i.e. if BSCALE and BZERO were
# not in the header but the data was unsigned, the attributes below
# will be update in __init__.
self._orig_bitpix = self._bitpix
self._orig_bscale = self._bscale
self._orig_bzero = self._bzero
# returning the data signals to lazyproperty that we've already handled
# setting self.__dict__['data']
return data
def update_header(self):
"""
Update the header keywords to agree with the data.
"""
if not (
self._modified
or self._header._modified
or (self._has_data and self.shape != self.data.shape)
):
# Not likely that anything needs updating
return
old_naxis = self._header.get("NAXIS", 0)
if "BITPIX" not in self._header:
bitpix_comment = self.standard_keyword_comments["BITPIX"]
else:
bitpix_comment = self._header.comments["BITPIX"]
# Update the BITPIX keyword and ensure it's in the correct
# location in the header
self._header.set("BITPIX", self._bitpix, bitpix_comment, after=0)
# If the data's shape has changed (this may have happened without our
# noticing either via a direct update to the data.shape attribute) we
# need to update the internal self._axes
if self._has_data and self.shape != self.data.shape:
self._axes = list(self.data.shape)
self._axes.reverse()
# Update the NAXIS keyword and ensure it's in the correct location in
# the header
if "NAXIS" in self._header:
naxis_comment = self._header.comments["NAXIS"]
else:
naxis_comment = self.standard_keyword_comments["NAXIS"]
self._header.set("NAXIS", len(self._axes), naxis_comment, after="BITPIX")
# TODO: This routine is repeated in several different classes--it
# should probably be made available as a method on all standard HDU
# types
# add NAXISi if it does not exist
for idx, axis in enumerate(self._axes):
naxisn = "NAXIS" + str(idx + 1)
if naxisn in self._header:
self._header[naxisn] = axis
else:
if idx == 0:
after = "NAXIS"
else:
after = "NAXIS" + str(idx)
self._header.set(naxisn, axis, after=after)
# delete extra NAXISi's
for idx in range(len(self._axes) + 1, old_naxis + 1):
try:
del self._header["NAXIS" + str(idx)]
except KeyError:
pass
if "BLANK" in self._header:
self._blank = self._header["BLANK"]
# Add BSCALE/BZERO to header if data is unsigned int.
self._update_pseudo_int_scale_keywords()
self._modified = False
def _update_header_scale_info(self, dtype=None):
"""
Delete BSCALE/BZERO from header if necessary.
"""
# Note that _dtype_for_bitpix determines the dtype based on the
# "original" values of bitpix, bscale, and bzero, stored in
# self._orig_bitpix, etc. It contains the logic for determining which
# special cases of BZERO/BSCALE, if any, are auto-detected as following
# the FITS unsigned int convention.
# Added original_was_unsigned with the intent of facilitating the
# special case of do_not_scale_image_data=True and uint=True
# eventually.
# FIXME: unused, maybe it should be useful?
# if self._dtype_for_bitpix() is not None:
# original_was_unsigned = self._dtype_for_bitpix().kind == 'u'
# else:
# original_was_unsigned = False
if self._do_not_scale_image_data or (
self._orig_bzero == 0 and self._orig_bscale == 1
):
return
if dtype is None:
dtype = self._dtype_for_bitpix()
if (
dtype is not None
and dtype.kind == "u"
and (self._scale_back or self._scale_back is None)
):
# Data is pseudo-unsigned integers, and the scale_back option
# was not explicitly set to False, so preserve all the scale
# factors
return
for keyword in ["BSCALE", "BZERO"]:
try:
del self._header[keyword]
# Since _update_header_scale_info can, currently, be called
# *after* _prewriteto(), replace these with blank cards so
# the header size doesn't change
self._header.append()
except KeyError:
pass
if dtype is None:
dtype = self._dtype_for_bitpix()
if dtype is not None:
self._header["BITPIX"] = DTYPE2BITPIX[dtype.name]
self._bzero = 0
self._bscale = 1
self._bitpix = self._header["BITPIX"]
self._blank = self._header.pop("BLANK", None)
def scale(self, type=None, option="old", bscale=None, bzero=None):
"""
Scale image data by using ``BSCALE``/``BZERO``.
Call to this method will scale `data` and update the keywords of
``BSCALE`` and ``BZERO`` in the HDU's header. This method should only
be used right before writing to the output file, as the data will be
scaled and is therefore not very usable after the call.
Parameters
----------
type : str, optional
destination data type, use a string representing a numpy
dtype name, (e.g. ``'uint8'``, ``'int16'``, ``'float32'``
etc.). If is `None`, use the current data type.
option : str, optional
How to scale the data: ``"old"`` uses the original ``BSCALE`` and
``BZERO`` values from when the data was read/created (defaulting to
1 and 0 if they don't exist). For integer data only, ``"minmax"``
uses the minimum and maximum of the data to scale. User-specified
``bscale``/``bzero`` values always take precedence.
bscale, bzero : int, optional
User-specified ``BSCALE`` and ``BZERO`` values
"""
# Disable blank support for now
self._scale_internal(
type=type, option=option, bscale=bscale, bzero=bzero, blank=None
)
def _scale_internal(
self, type=None, option="old", bscale=None, bzero=None, blank=0
):
"""
This is an internal implementation of the `scale` method, which
also supports handling BLANK properly.
TODO: This is only needed for fixing #3865 without introducing any
public API changes. We should support BLANK better when rescaling
data, and when that is added the need for this internal interface
should go away.
Note: the default of ``blank=0`` merely reflects the current behavior,
and is not necessarily a deliberate choice (better would be to disallow
conversion of floats to ints without specifying a BLANK if there are
NaN/inf values).
"""
if self.data is None:
return
# Determine the destination (numpy) data type
if type is None:
type = BITPIX2DTYPE[self._bitpix]
_type = getattr(np, type)
# Determine how to scale the data
# bscale and bzero takes priority
if bscale is not None and bzero is not None:
_scale = bscale
_zero = bzero
elif bscale is not None:
_scale = bscale
_zero = 0
elif bzero is not None:
_scale = 1
_zero = bzero
elif (
option == "old"
and self._orig_bscale is not None
and self._orig_bzero is not None
):
_scale = self._orig_bscale
_zero = self._orig_bzero
elif option == "minmax" and not issubclass(_type, np.floating):
if _is_dask_array(self.data):
min = self.data.min().compute()
max = self.data.max().compute()
else:
min = np.minimum.reduce(self.data.flat)
max = np.maximum.reduce(self.data.flat)
if _type == np.uint8: # uint8 case
_zero = min
_scale = (max - min) / (2.0**8 - 1)
else:
_zero = (max + min) / 2.0
# throw away -2^N
nbytes = 8 * _type().itemsize
_scale = (max - min) / (2.0**nbytes - 2)
else:
_scale = 1
_zero = 0
# Do the scaling
if _zero != 0:
if _is_dask_array(self.data):
self.data = self.data - _zero
else:
# 0.9.6.3 to avoid out of range error for BZERO = +32768
# We have to explicitly cast _zero to prevent numpy from raising an
# error when doing self.data -= zero, and we do this instead of
# self.data = self.data - zero to avoid doubling memory usage.
np.add(self.data, -_zero, out=self.data, casting="unsafe")
self._header["BZERO"] = _zero
else:
try:
del self._header["BZERO"]
except KeyError:
pass
if _scale and _scale != 1:
self.data = self.data / _scale
self._header["BSCALE"] = _scale
else:
try:
del self._header["BSCALE"]
except KeyError:
pass
# Set blanks
if blank is not None and issubclass(_type, np.integer):
# TODO: Perhaps check that the requested BLANK value fits in the
# integer type being scaled to?
self.data[np.isnan(self.data)] = blank
self._header["BLANK"] = blank
if self.data.dtype.type != _type:
self.data = np.array(np.around(self.data), dtype=_type)
# Update the BITPIX Card to match the data
self._bitpix = DTYPE2BITPIX[self.data.dtype.name]
self._bzero = self._header.get("BZERO", 0)
self._bscale = self._header.get("BSCALE", 1)
self._blank = blank
self._header["BITPIX"] = self._bitpix
# Since the image has been manually scaled, the current
# bitpix/bzero/bscale now serve as the 'original' scaling of the image,
# as though the original image has been completely replaced
self._orig_bitpix = self._bitpix
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
self._orig_blank = self._blank
def _verify(self, option="warn"):
# update_header can fix some things that would otherwise cause
# verification to fail, so do that now...
self.update_header()
self._verify_blank()
return super()._verify(option)
def _verify_blank(self):
# Probably not the best place for this (it should probably happen
# in _verify as well) but I want to be able to raise this warning
# both when the HDU is created and when written
if self._blank is None:
return
messages = []
# TODO: Once the FITSSchema framewhere is merged these warnings
# should be handled by the schema
if not _is_int(self._blank):
messages.append(
"Invalid value for 'BLANK' keyword in header: {!r} "
"The 'BLANK' keyword must be an integer. It will be "
"ignored in the meantime.".format(self._blank)
)
self._blank = None
if not self._bitpix > 0:
messages.append(
"Invalid 'BLANK' keyword in header. The 'BLANK' keyword "
"is only applicable to integer data, and will be ignored "
"in this HDU."
)
self._blank = None
for msg in messages:
warnings.warn(msg, VerifyWarning)
def _prewriteto(self, checksum=False, inplace=False):
if self._scale_back:
self._scale_internal(
BITPIX2DTYPE[self._orig_bitpix], blank=self._orig_blank
)
self.update_header()
if not inplace and self._data_needs_rescale:
# Go ahead and load the scaled image data and update the header
# with the correct post-rescaling headers
_ = self.data
return super()._prewriteto(checksum, inplace)
def _writedata_internal(self, fileobj):
size = 0
if self.data is None:
return size
elif _is_dask_array(self.data):
return self._writeinternal_dask(fileobj)
else:
# Based on the system type, determine the byteorders that
# would need to be swapped to get to big-endian output
if sys.byteorder == "little":
swap_types = ("<", "=")
else:
swap_types = ("<",)
# deal with unsigned integer 16, 32 and 64 data
if _is_pseudo_integer(self.data.dtype):
# Convert the unsigned array to signed
output = np.array(
self.data - _pseudo_zero(self.data.dtype),
dtype=f">i{self.data.dtype.itemsize}",
)
should_swap = False
else:
output = self.data
byteorder = output.dtype.str[0]
should_swap = byteorder in swap_types
if should_swap:
if output.flags.writeable:
output.byteswap(True)
try:
fileobj.writearray(output)
finally:
output.byteswap(True)
else:
# For read-only arrays, there is no way around making
# a byteswapped copy of the data.
fileobj.writearray(output.byteswap(False))
else:
fileobj.writearray(output)
size += output.size * output.itemsize
return size
def _writeinternal_dask(self, fileobj):
if sys.byteorder == "little":
swap_types = ("<", "=")
else:
swap_types = ("<",)
# deal with unsigned integer 16, 32 and 64 data
if _is_pseudo_integer(self.data.dtype):
raise NotImplementedError("This dtype isn't currently supported with dask.")
else:
output = self.data
byteorder = output.dtype.str[0]
should_swap = byteorder in swap_types
if should_swap:
from dask.utils import M
# NOTE: the inplace flag to byteswap needs to be False otherwise the array is
# byteswapped in place every time it is computed and this affects
# the input dask array.
output = output.map_blocks(M.byteswap, False).map_blocks(
M.newbyteorder, "S"
)
initial_position = fileobj.tell()
n_bytes = output.nbytes
# Extend the file n_bytes into the future
fileobj.seek(initial_position + n_bytes - 1)
fileobj.write(b"\0")
fileobj.flush()
if fileobj.fileobj_mode not in ("rb+", "wb+", "ab+"):
# Use another file handle if the current one is not in
# read/write mode
fp = open(fileobj.name, mode="rb+")
should_close = True
else:
fp = fileobj._file
should_close = False
try:
outmmap = mmap.mmap(
fp.fileno(), length=initial_position + n_bytes, access=mmap.ACCESS_WRITE
)
outarr = np.ndarray(
shape=output.shape,
dtype=output.dtype,
offset=initial_position,
buffer=outmmap,
)
output.store(outarr, lock=True, compute=True)
finally:
if should_close:
fp.close()
outmmap.close()
# On Windows closing the memmap causes the file pointer to return to 0, so
# we need to go back to the end of the data (since padding may be written
# after)
fileobj.seek(initial_position + n_bytes)
return n_bytes
def _dtype_for_bitpix(self):
"""
Determine the dtype that the data should be converted to depending on
the BITPIX value in the header, and possibly on the BSCALE value as
well. Returns None if there should not be any change.
"""
bitpix = self._orig_bitpix
# Handle possible conversion to uints if enabled
if self._uint and self._orig_bscale == 1:
if bitpix == 8 and self._orig_bzero == -128:
return np.dtype("int8")
for bits, dtype in (
(16, np.dtype("uint16")),
(32, np.dtype("uint32")),
(64, np.dtype("uint64")),
):
if bitpix == bits and self._orig_bzero == 1 << (bits - 1):
return dtype
if bitpix > 16: # scale integers to Float64
return np.dtype("float64")
elif bitpix > 0: # scale integers to Float32
return np.dtype("float32")
def _convert_pseudo_integer(self, data):
"""
Handle "pseudo-unsigned" integers, if the user requested it. Returns
the converted data array if so; otherwise returns None.
In this case case, we don't need to handle BLANK to convert it to NAN,
since we can't do NaNs with integers, anyway, i.e. the user is
responsible for managing blanks.
"""
dtype = self._dtype_for_bitpix()
# bool(dtype) is always False--have to explicitly compare to None; this
# caused a fair amount of hair loss
if dtype is not None and dtype.kind == "u":
# Convert the input raw data into an unsigned integer array and
# then scale the data adjusting for the value of BZERO. Note that
# we subtract the value of BZERO instead of adding because of the
# way numpy converts the raw signed array into an unsigned array.
bits = dtype.itemsize * 8
data = np.array(data, dtype=dtype)
data -= np.uint64(1 << (bits - 1))
return data
def _get_scaled_image_data(self, offset, shape):
"""
Internal function for reading image data from a file and apply scale
factors to it. Normally this is used for the entire image, but it
supports alternate offset/shape for Section support.
"""
code = BITPIX2DTYPE[self._orig_bitpix]
raw_data = self._get_raw_data(shape, code, offset)
raw_data.dtype = raw_data.dtype.newbyteorder(">")
if self._do_not_scale_image_data or (
self._orig_bzero == 0 and self._orig_bscale == 1 and self._blank is None
):
# No further conversion of the data is necessary
return raw_data
try:
if self._file.strict_memmap:
raise ValueError(
"Cannot load a memory-mapped image: "
"BZERO/BSCALE/BLANK header keywords present. "
"Set memmap=False."
)
except AttributeError: # strict_memmap not set
pass
data = None
if not (self._orig_bzero == 0 and self._orig_bscale == 1):
data = self._convert_pseudo_integer(raw_data)
if data is None:
# In these cases, we end up with floating-point arrays and have to
# apply bscale and bzero. We may have to handle BLANK and convert
# to NaN in the resulting floating-point arrays.
# The BLANK keyword should only be applied for integer data (this
# is checked in __init__ but it can't hurt to double check here)
blanks = None
if self._blank is not None and self._bitpix > 0:
blanks = raw_data.flat == self._blank
# The size of blanks in bytes is the number of elements in
# raw_data.flat. However, if we use np.where instead we will
# only use 8 bytes for each index where the condition is true.
# So if the number of blank items is fewer than
# len(raw_data.flat) / 8, using np.where will use less memory
if blanks.sum() < len(blanks) / 8:
blanks = np.where(blanks)
new_dtype = self._dtype_for_bitpix()
if new_dtype is not None:
data = np.array(raw_data, dtype=new_dtype)
else: # floating point cases
if self._file is not None and self._file.memmap:
data = raw_data.copy()
elif not raw_data.flags.writeable:
# create a writeable copy if needed
data = raw_data.copy()
# if not memmap, use the space already in memory
else:
data = raw_data
del raw_data
if self._orig_bscale != 1:
np.multiply(data, self._orig_bscale, data)
if self._orig_bzero != 0:
data += self._orig_bzero
if self._blank:
data.flat[blanks] = np.nan
return data
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
format = ""
else:
format = self.data.dtype.name
format = format[format.rfind(".") + 1 :]
else:
if self.shape and all(self.shape):
# Only show the format if all the dimensions are non-zero
# if data is not touched yet, use header info.
format = BITPIX2DTYPE[self._bitpix]
else:
format = ""
if (
format
and not self._do_not_scale_image_data
and (self._orig_bscale != 1 or self._orig_bzero != 0)
):
new_dtype = self._dtype_for_bitpix()
if new_dtype is not None:
format += f" (rescales to {new_dtype.name})"
# Display shape in FITS-order
shape = tuple(reversed(self.shape))
return (self.name, self.ver, class_name, len(self._header), shape, format, "")
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
d = self.data
# First handle the special case where the data is unsigned integer
# 16, 32 or 64
if _is_pseudo_integer(self.data.dtype):
d = np.array(
self.data - _pseudo_zero(self.data.dtype),
dtype=f"i{self.data.dtype.itemsize}",
)
# Check the byte order of the data. If it is little endian we
# must swap it before calculating the datasum.
if d.dtype.str[0] != ">":
if d.flags.writeable:
byteswapped = True
d = d.byteswap(True)
d.dtype = d.dtype.newbyteorder(">")
else:
# If the data is not writeable, we just make a byteswapped
# copy and don't bother changing it back after
d = d.byteswap(False)
d.dtype = d.dtype.newbyteorder(">")
byteswapped = False
else:
byteswapped = False
cs = self._compute_checksum(d.flatten().view(np.uint8))
# If the data was byteswapped in this method then return it to
# its original little-endian order.
if byteswapped and not _is_pseudo_integer(self.data.dtype):
d.byteswap(True)
d.dtype = d.dtype.newbyteorder("<")
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
class Section:
"""
Class enabling subsets of ImageHDU data to be loaded lazily via slicing.
Slices of this object load the corresponding section of an image array from
the underlying FITS file, and applies any BSCALE/BZERO factors.
Section slices cannot be assigned to, and modifications to a section are
not saved back to the underlying file.
See the :ref:`astropy:data-sections` section of the Astropy documentation
for more details.
"""
def __init__(self, hdu):
self.hdu = hdu
@property
def shape(self):
# Implementing `.shape` enables `astropy.nddata.Cutout2D` to accept
# `ImageHDU.section` in place of `.data`.
return self.hdu.shape
def __getitem__(self, key):
"""Returns a slice of HDU data specified by `key`.
If the image HDU is backed by a file handle, this method will only read
the chunks of the file needed to extract `key`, which is useful in
situations where the file is located on a slow or remote file system
(e.g., cloud storage).
"""
if not isinstance(key, tuple):
key = (key,)
naxis = len(self.hdu.shape)
return_scalar = (
all(isinstance(k, (int, np.integer)) for k in key) and len(key) == naxis
)
if not any(k is Ellipsis for k in key):
# We can always add a ... at the end, after making note of whether
# to return a scalar.
key += (Ellipsis,)
ellipsis_count = len([k for k in key if k is Ellipsis])
if len(key) - ellipsis_count > naxis or ellipsis_count > 1:
raise IndexError("too many indices for array")
# Insert extra dimensions as needed.
idx = next(i for i, k in enumerate(key + (Ellipsis,)) if k is Ellipsis)
key = key[:idx] + (slice(None),) * (naxis - len(key) + 1) + key[idx + 1 :]
return_0dim = (
all(isinstance(k, (int, np.integer)) for k in key) and len(key) == naxis
)
dims = []
offset = 0
# Find all leading axes for which a single point is used.
for idx in range(naxis):
axis = self.hdu.shape[idx]
indx = _IndexInfo(key[idx], axis)
offset = offset * axis + indx.offset
if not _is_int(key[idx]):
dims.append(indx.npts)
break
is_contiguous = indx.contiguous
for jdx in range(idx + 1, naxis):
axis = self.hdu.shape[jdx]
indx = _IndexInfo(key[jdx], axis)
dims.append(indx.npts)
if indx.npts == axis and indx.contiguous:
# The offset needs to multiply the length of all remaining axes
offset *= axis
else:
is_contiguous = False
if is_contiguous:
dims = tuple(dims) or (1,)
bitpix = self.hdu._orig_bitpix
offset = self.hdu._data_offset + offset * abs(bitpix) // 8
# Note: the actual file read operations are delegated to
# `util._array_from_file` via `ImageHDU._get_scaled_image_data`
data = self.hdu._get_scaled_image_data(offset, dims)
else:
data = self._getdata(key)
if return_scalar:
data = data.item()
elif return_0dim:
data = data.squeeze()
return data
def _getdata(self, keys):
for idx, (key, axis) in enumerate(zip(keys, self.hdu.shape)):
if isinstance(key, slice):
ks = range(*key.indices(axis))
break
elif isiterable(key):
# Handle both integer and boolean arrays.
ks = np.arange(axis, dtype=int)[key]
break
# This should always break at some point if _getdata is called.
data = [self[keys[:idx] + (k,) + keys[idx + 1 :]] for k in ks]
if any(isinstance(key, slice) or isiterable(key) for key in keys[idx + 1 :]):
# data contains multidimensional arrays; combine them.
return np.array(data)
else:
# Only singleton dimensions remain; concatenate in a 1D array.
return np.concatenate([np.atleast_1d(array) for array in data])
class PrimaryHDU(_ImageBaseHDU):
"""
FITS primary HDU class.
"""
_default_name = "PRIMARY"
def __init__(
self,
data=None,
header=None,
do_not_scale_image_data=False,
ignore_blank=False,
uint=True,
scale_back=None,
):
"""
Construct a primary HDU.
Parameters
----------
data : array or ``astropy.io.fits.hdu.base.DELAYED``, optional
The data in the HDU.
header : `~astropy.io.fits.Header`, optional
The header to be used (as a template). If ``header`` is `None`, a
minimal header will be provided.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. (default: False)
ignore_blank : bool, optional
If `True`, the BLANK header keyword will be ignored if present.
Otherwise, pixels equal to this value will be replaced with
NaNs. (default: False)
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the
central value and ``BSCALE == 1`` as unsigned integer
data. For example, ``int16`` data with ``BZERO = 32768``
and ``BSCALE = 1`` would be treated as ``uint16`` data.
(default: True)
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled
image data, restore the data to the original type and reapply the
original BSCALE/BZERO values. This could lead to loss of accuracy
if scaling back to integer values after performing floating point
operations on the data. Pseudo-unsigned integers are automatically
rescaled unless scale_back is explicitly set to `False`.
(default: None)
"""
super().__init__(
data=data,
header=header,
do_not_scale_image_data=do_not_scale_image_data,
uint=uint,
ignore_blank=ignore_blank,
scale_back=scale_back,
)
# insert the keywords EXTEND
if header is None:
dim = self._header["NAXIS"]
if dim == 0:
dim = ""
self._header.set("EXTEND", True, after="NAXIS" + str(dim))
@classmethod
def match_header(cls, header):
card = header.cards[0]
# Due to problems discussed in #5808, we cannot assume the 'GROUPS'
# keyword to be True/False, have to check the value
return (
card.keyword == "SIMPLE"
and ("GROUPS" not in header or header["GROUPS"] != True) # noqa: E712
and card.value
)
def update_header(self):
super().update_header()
# Update the position of the EXTEND keyword if it already exists
if "EXTEND" in self._header:
if len(self._axes):
after = "NAXIS" + str(len(self._axes))
else:
after = "NAXIS"
self._header.set("EXTEND", after=after)
def _verify(self, option="warn"):
errs = super()._verify(option=option)
# Verify location and value of mandatory keywords.
# The EXTEND keyword is only mandatory if the HDU has extensions; this
# condition is checked by the HDUList object. However, if we already
# have an EXTEND keyword check that its position is correct
if "EXTEND" in self._header:
naxis = self._header.get("NAXIS", 0)
self.req_cards(
"EXTEND", naxis + 3, lambda v: isinstance(v, bool), True, option, errs
)
return errs
class ImageHDU(_ImageBaseHDU, ExtensionHDU):
"""
FITS image extension HDU class.
"""
_extension = "IMAGE"
def __init__(
self,
data=None,
header=None,
name=None,
do_not_scale_image_data=False,
uint=True,
scale_back=None,
ver=None,
):
"""
Construct an image HDU.
Parameters
----------
data : array
The data in the HDU.
header : `~astropy.io.fits.Header`
The header to be used (as a template). If ``header`` is
`None`, a minimal header will be provided.
name : str, optional
The name of the HDU, will be the value of the keyword
``EXTNAME``.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. (default: False)
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the
central value and ``BSCALE == 1`` as unsigned integer
data. For example, ``int16`` data with ``BZERO = 32768``
and ``BSCALE = 1`` would be treated as ``uint16`` data.
(default: True)
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled
image data, restore the data to the original type and reapply the
original BSCALE/BZERO values. This could lead to loss of accuracy
if scaling back to integer values after performing floating point
operations on the data. Pseudo-unsigned integers are automatically
rescaled unless scale_back is explicitly set to `False`.
(default: None)
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
"""
# This __init__ currently does nothing differently from the base class,
# and is only explicitly defined for the docstring.
super().__init__(
data=data,
header=header,
name=name,
do_not_scale_image_data=do_not_scale_image_data,
uint=uint,
scale_back=scale_back,
ver=ver,
)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return card.keyword == "XTENSION" and xtension == cls._extension
def _verify(self, option="warn"):
"""
ImageHDU verify method.
"""
errs = super()._verify(option=option)
naxis = self._header.get("NAXIS", 0)
# PCOUNT must == 0, GCOUNT must == 1; the former is verified in
# ExtensionHDU._verify, however ExtensionHDU._verify allows PCOUNT
# to be >= 0, so we need to check it here
self.req_cards(
"PCOUNT", naxis + 3, lambda v: (_is_int(v) and v == 0), 0, option, errs
)
return errs
class _IndexInfo:
def __init__(self, indx, naxis):
if _is_int(indx):
if indx < 0: # support negative indexing
indx = indx + naxis
if 0 <= indx < naxis:
self.npts = 1
self.offset = indx
self.contiguous = True
else:
raise IndexError(f"Index {indx} out of range.")
elif isinstance(indx, slice):
start, stop, step = indx.indices(naxis)
self.npts = (stop - start) // step
self.offset = start
self.contiguous = step == 1
elif isiterable(indx):
self.npts = len(indx)
self.offset = 0
self.contiguous = False
else:
raise IndexError(f"Illegal index {indx}")
|
{
"content_hash": "0f062bae185b67d6429e5e62821d045c",
"timestamp": "",
"source": "github",
"line_count": 1308,
"max_line_length": 89,
"avg_line_length": 36.85703363914373,
"alnum_prop": 0.5550001037150739,
"repo_name": "pllim/astropy",
"id": "dfaa4273371d96a0fd43ce874d44811353b8a4f5",
"size": "48273",
"binary": false,
"copies": "3",
"ref": "refs/heads/placeholder",
"path": "astropy/io/fits/hdu/image.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040101"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78776"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12404182"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
import unittest
import re
import pytest
import fiona
from .conftest import WGS84PATTERN
@pytest.mark.usefixtures("unittest_path_coutwildrnp_shp")
class ReadingTest(unittest.TestCase):
def setUp(self):
self.c = fiona.open(self.path_coutwildrnp_shp, "r")
def tearDown(self):
self.c.close()
def test_open_repr(self):
assert (
repr(self.c) ==
("<open Collection '{path}:coutwildrnp', mode 'r' "
"at {hexid}>".format(hexid=hex(id(self.c)), path=self.path_coutwildrnp_shp)))
def test_closed_repr(self):
self.c.close()
assert (
repr(self.c) ==
("<closed Collection '{path}:coutwildrnp', mode 'r' "
"at {hexid}>".format(hexid=hex(id(self.c)), path=self.path_coutwildrnp_shp)))
def test_path(self):
assert self.c.path == self.path_coutwildrnp_shp
def test_name(self):
assert self.c.name == 'coutwildrnp'
def test_mode(self):
assert self.c.mode == 'r'
def test_encoding(self):
assert self.c.encoding is None
def test_iter(self):
assert iter(self.c)
def test_closed_no_iter(self):
self.c.close()
with pytest.raises(ValueError):
iter(self.c)
def test_len(self):
assert len(self.c) == 67
def test_closed_len(self):
# Len is lazy, it's never computed in this case. TODO?
self.c.close()
assert len(self.c) == 0
def test_len_closed_len(self):
# Lazy len is computed in this case and sticks.
len(self.c)
self.c.close()
assert len(self.c) == 67
def test_driver(self):
assert self.c.driver == "ESRI Shapefile"
def test_closed_driver(self):
self.c.close()
assert self.c.driver is None
def test_driver_closed_driver(self):
self.c.driver
self.c.close()
assert self.c.driver == "ESRI Shapefile"
def test_schema(self):
s = self.c.schema['properties']
assert s['PERIMETER'] == "float:24.15"
assert s['NAME'] == "str:80"
assert s['URL'] == "str:101"
assert s['STATE_FIPS'] == "str:80"
assert s['WILDRNP020'] == "int:10"
def test_closed_schema(self):
# Schema is lazy too, never computed in this case. TODO?
self.c.close()
assert self.c.schema is None
def test_schema_closed_schema(self):
self.c.schema
self.c.close()
assert sorted(self.c.schema.keys()) == ['geometry', 'properties']
def test_crs(self):
crs = self.c.crs
assert crs['init'] == 'epsg:4326'
def test_crs_wkt(self):
crs = self.c.crs_wkt
assert re.match(WGS84PATTERN, crs)
def test_closed_crs(self):
# Crs is lazy too, never computed in this case. TODO?
self.c.close()
assert self.c.crs is None
def test_crs_closed_crs(self):
self.c.crs
self.c.close()
assert sorted(self.c.crs.keys()) == ['init']
def test_meta(self):
assert (sorted(self.c.meta.keys()) ==
['crs', 'crs_wkt', 'driver', 'schema'])
def test_profile(self):
assert (sorted(self.c.profile.keys()) ==
['crs', 'crs_wkt', 'driver', 'schema'])
def test_bounds(self):
assert self.c.bounds[0] == pytest.approx(-113.564247)
assert self.c.bounds[1] == pytest.approx(37.068981)
assert self.c.bounds[2] == pytest.approx(-104.970871)
assert self.c.bounds[3] == pytest.approx(41.996277)
def test_context(self):
with fiona.open(self.path_coutwildrnp_shp, "r") as c:
assert c.name == 'coutwildrnp'
assert len(c) == 67
assert c.closed
def test_iter_one(self):
itr = iter(self.c)
f = next(itr)
assert f['id'] == "0"
assert f['properties']['STATE'] == 'UT'
def test_iter_list(self):
f = list(self.c)[0]
assert f['id'] == "0"
assert f['properties']['STATE'] == 'UT'
def test_re_iter_list(self):
f = list(self.c)[0] # Run through iterator
f = list(self.c)[0] # Run through a new, reset iterator
assert f['id'] == "0"
assert f['properties']['STATE'] == 'UT'
def test_getitem_one(self):
f = self.c[0]
assert f['id'] == "0"
assert f['properties']['STATE'] == 'UT'
def test_getitem_iter_combo(self):
i = iter(self.c)
f = next(i)
f = next(i)
assert f['id'] == "1"
f = self.c[0]
assert f['id'] == "0"
f = next(i)
assert f['id'] == "2"
def test_no_write(self):
with pytest.raises(OSError):
self.c.write({})
def test_iter_items_list(self):
i, f = list(self.c.items())[0]
assert i == 0
assert f['id'] == "0"
assert f['properties']['STATE'] == 'UT'
def test_iter_keys_list(self):
i = list(self.c.keys())[0]
assert i == 0
def test_in_keys(self):
assert 0 in self.c.keys()
assert 0 in self.c
|
{
"content_hash": "71d48cf7d615e0545e542d5bda66b30c",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 90,
"avg_line_length": 27.83606557377049,
"alnum_prop": 0.5459363957597173,
"repo_name": "Toblerity/Fiona",
"id": "5fc567b8b86ab28e90e51a5793c5c0d4a2b76f11",
"size": "5132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_collection_legacy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "Cython",
"bytes": "215771"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "456515"
},
{
"name": "Shell",
"bytes": "4572"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.mathjax', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'psi'
copyright = u'2013, Renato de Pontes Pereira'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'psidoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'psi.tex', u'psi Documentation',
u'Renato de Pontes Pereira', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'psi', u'psi Documentation',
[u'Renato de Pontes Pereira'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'psi', u'psi Documentation',
u'Renato de Pontes Pereira', 'psi', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "0f0b49b9a0b2abc2aa4dc0bf221d6c13",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 80,
"avg_line_length": 32.14468085106383,
"alnum_prop": 0.7017474185861795,
"repo_name": "renatopp/psi-robotics",
"id": "34832bc4ca3cc0c5fab1a8742bc30e704fa2f4c0",
"size": "7968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "245314"
},
{
"name": "Shell",
"bytes": "6462"
}
],
"symlink_target": ""
}
|
import os
import platform
import re
import pytest
from conans.client.tools.oss import detected_os
from conans.model.info import ConanInfo
from conans.model.ref import ConanFileReference, PackageReference
from conans.paths import CONANFILE_TXT, CONANINFO
from conans.test.utils.tools import TestClient, GenConanfile
from conans.util.files import save
@pytest.fixture()
def client():
c = TestClient()
save(c.cache.settings_path, "os: [Windows, Macos, Linux, FreeBSD]\nos_build: [Windows, Macos]\narch_build: [x86_64]")
save(c.cache.default_profile_path, "[settings]\nos=Windows")
def base_conanfile(name):
return GenConanfile(name, "0.1").with_option("language", [0, 1])\
.with_default_option("language", 0).with_settings("os")
c.save({"conanfile.py": base_conanfile("Hello0")})
c.run("export . lasote/stable")
c.save({"conanfile.py": base_conanfile("Hello1").with_requires("Hello0/0.1@lasote/stable")})
c.run("export . lasote/stable")
c.save({"conanfile.py": base_conanfile("Hello2").with_requires("Hello1/0.1@lasote/stable")})
c.run("export . lasote/stable")
return c
def test_install_combined(client):
client.run("install . --build=missing")
client.run("install . --build=missing --build Hello1")
assert "Hello0/0.1@lasote/stable: Already installed!" in client.out
assert "Hello1/0.1@lasote/stable: Forced build from source" in client.out
def test_install_transitive_cache(client):
client.run("install Hello2/0.1@lasote/stable --build=missing")
assert "Hello0/0.1@lasote/stable: Generating the package" in client.out
assert "Hello1/0.1@lasote/stable: Generating the package" in client.out
assert "Hello2/0.1@lasote/stable: Generating the package" in client.out
def test_partials(client):
client.run("install . --build=missing")
client.run("install ./ --build=Bye")
assert "No package matching 'Bye' pattern" in client.out
for package in ["Hello0", "Hello1"]:
client.run("install . --build=%s" % package)
assert "No package matching" not in client.out
def test_reuse(client):
for lang, id0, id1 in [(0, "3475bd55b91ae904ac96fde0f106a136ab951a5e",
"5faecfb46fd09e49f1812d732d6360bc1663e3ab"),
(1, "f43bd822487baa4ed2426c279c27b2811870499a",
"b96337c5fcdafd6533298017c2ba94812654f8ec")]:
client.run("install . -o *:language=%d --build missing" % lang)
assert "Configuration:[settings]", "".join(str(client.out).splitlines())
info_path = os.path.join(client.current_folder, CONANINFO)
conan_info = ConanInfo.load_file(info_path)
assert "os=Windows" == conan_info.settings.dumps()
assert "language=%s" % lang, conan_info.options.dumps()
ref = ConanFileReference.loads("Hello0/0.1@lasote/stable")
hello0 = client.cache.package_layout(ref).package(PackageReference(ref, id0))
hello0_info = os.path.join(hello0, CONANINFO)
hello0_conan_info = ConanInfo.load_file(hello0_info)
assert lang == hello0_conan_info.options.language
pref1 = PackageReference(ConanFileReference.loads("Hello1/0.1@lasote/stable"), id1)
hello1 = client.cache.package_layout(pref1.ref).package(pref1)
hello1_info = os.path.join(hello1, CONANINFO)
hello1_conan_info = ConanInfo.load_file(hello1_info)
assert lang == hello1_conan_info.options.language
def test_upper_option(client):
client.run("install conanfile.py -o Hello2:language=1 -o Hello1:language=0 "
"-o Hello0:language=1 --build missing")
package_id = re.search(r"Hello0/0.1@lasote/stable:(\S+)", str(client.out)).group(1)
package_id2 = re.search(r"Hello1/0.1@lasote/stable:(\S+)", str(client.out)).group(1)
info_path = os.path.join(client.current_folder, CONANINFO)
conan_info = ConanInfo.load_file(info_path)
assert "language=1" == conan_info.options.dumps()
ref = ConanFileReference.loads("Hello0/0.1@lasote/stable")
pref = PackageReference(ref, package_id)
hello0 = client.cache.package_layout(ref).package(pref)
hello0_info = os.path.join(hello0, CONANINFO)
hello0_conan_info = ConanInfo.load_file(hello0_info)
assert 1 == hello0_conan_info.options.language
pref1 = PackageReference(ConanFileReference.loads("Hello1/0.1@lasote/stable"), package_id2)
hello1 = client.cache.package_layout(pref1.ref).package(pref1)
hello1_info = os.path.join(hello1, CONANINFO)
hello1_conan_info = ConanInfo.load_file(hello1_info)
assert 0 == hello1_conan_info.options.language
def test_inverse_upper_option(client):
client.run("install . -o language=0 -o Hello1:language=1 -o Hello0:language=0 --build missing")
package_id = re.search(r"Hello0/0.1@lasote/stable:(\S+)", str(client.out)).group(1)
package_id2 = re.search(r"Hello1/0.1@lasote/stable:(\S+)", str(client.out)).group(1)
info_path = os.path.join(client.current_folder, CONANINFO)
conan_info = ConanInfo.load_file(info_path)
assert "language=0" == conan_info.options.dumps()
ref = ConanFileReference.loads("Hello0/0.1@lasote/stable")
pref = PackageReference(ref, package_id)
hello0 = client.cache.package_layout(ref).package(pref)
hello0_info = os.path.join(hello0, CONANINFO)
hello0_conan_info = ConanInfo.load_file(hello0_info)
assert "language=0" == hello0_conan_info.options.dumps()
pref1 = PackageReference(ConanFileReference.loads("Hello1/0.1@lasote/stable"), package_id2)
hello1 = client.cache.package_layout(pref1.ref).package(pref1)
hello1_info = os.path.join(hello1, CONANINFO)
hello1_conan_info = ConanInfo.load_file(hello1_info)
assert "language=1" == hello1_conan_info.options.dumps()
def test_upper_option_txt(client):
files = {CONANFILE_TXT: """[requires]
Hello1/0.1@lasote/stable
[options]
Hello0:language=1
Hello1:language=0
"""}
client.save(files, clean_first=True)
client.run("install . --build missing")
package_id = re.search(r"Hello0/0.1@lasote/stable:(\S+)", str(client.out)).group(1)
package_id2 = re.search(r"Hello1/0.1@lasote/stable:(\S+)", str(client.out)).group(1)
info_path = os.path.join(client.current_folder, CONANINFO)
conan_info = ConanInfo.load_file(info_path)
assert "" == conan_info.options.dumps()
ref = ConanFileReference.loads("Hello0/0.1@lasote/stable")
pref = PackageReference(ref, package_id)
hello0 = client.cache.package_layout(ref).package(pref)
hello0_info = os.path.join(hello0, CONANINFO)
hello0_conan_info = ConanInfo.load_file(hello0_info)
assert 1 == hello0_conan_info.options.language
pref1 = PackageReference(ConanFileReference.loads("Hello1/0.1@lasote/stable"), package_id2)
hello1 = client.cache.package_layout(pref1.ref).package(pref1)
hello1_info = os.path.join(hello1, CONANINFO)
hello1_conan_info = ConanInfo.load_file(hello1_info)
assert 0 == hello1_conan_info.options.language
def test_change_option_txt(client):
# Do not adjust cpu_count, it is reusing a cache
client = TestClient(cache_folder=client.cache_folder, cpu_count=False)
files = {CONANFILE_TXT: """[requires]
Hello0/0.1@lasote/stable
[options]
Hello0:language=1
"""}
client.save(files)
client.run("install conanfile.txt --build missing")
info_path = os.path.join(client.current_folder, CONANINFO)
conan_info = ConanInfo.load_file(info_path)
assert "" == conan_info.options.dumps()
assert "Hello0:language=1" == conan_info.full_options.dumps()
assert "Hello0/0.1@lasote/stable:f43bd822487baa4ed2426c279c27b2811870499a" ==\
conan_info.full_requires.dumps()
files = {CONANFILE_TXT: """[requires]
Hello0/0.1@lasote/stable
[options]
Hello0:language=0
"""}
client.save(files)
client.run("install . --build missing")
info_path = os.path.join(client.current_folder, CONANINFO)
conan_info = ConanInfo.load_file(info_path)
assert "" == conan_info.options.dumps()
# For conan install options are not cached anymore
assert "Hello0:language=0" == conan_info.full_options.dumps()
# it is necessary to clean the cached conaninfo
client.save(files, clean_first=True)
client.run("install ./conanfile.txt --build missing")
conan_info = ConanInfo.load_file(info_path)
assert "" == conan_info.options.dumps()
assert "Hello0:language=0" == conan_info.full_options.dumps()
assert "Hello0/0.1@lasote/stable:3475bd55b91ae904ac96fde0f106a136ab951a5e" \
== conan_info.full_requires.dumps()
def test_cross_platform_msg(client):
# Explicit with os_build and os_arch settings
client.run("install Hello0/0.1@lasote/stable -s os_build=Macos -s arch_build=x86_64 -s os=Windows", assert_error=True)
assert "Cross-build from 'Macos:x86_64' to 'Windows:None'" in client.out
assert "ERROR: Missing binary: Hello0" in client.out
bad_os = "Windows" if platform.system() != "Windows" else "Macos"
client.run("install Hello0/0.1@lasote/stable -s os={} -s arch_build=x86_64".format(bad_os), assert_error=True)
# Implicit detection when not available (retrocompatibility)
message = "Cross-build from '{}:x86_64' to '{}:None'".format(detected_os(), bad_os)
assert message in client.out
|
{
"content_hash": "59ce97fa3ffdf5f55d0e2377d51a8e1f",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 122,
"avg_line_length": 43.61574074074074,
"alnum_prop": 0.6868697590489332,
"repo_name": "conan-io/conan",
"id": "442a58a5ca45f70949aac0aee7e8e2ff32907816",
"size": "9421",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/integration/command/install/test_install_transitive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Python",
"bytes": "8209945"
}
],
"symlink_target": ""
}
|
import functools
import logging
import os
import sys
import traceback
CURRENT_SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
LOG_FORMAT = ('[%(asctime)s PID %(process)s '
'%(filename)s:%(lineno)s - %(funcName)s()] '
'%(levelname)s -> \n'
'%(message)s\n')
# Configure the logging both to file and to console. Works from python 3.3+
logging.basicConfig(
format=LOG_FORMAT,
level=logging.INFO,
handlers=[
logging.FileHandler(f'{CURRENT_SCRIPT_NAME}.log'),
logging.StreamHandler(sys.stdout)
])
def catch_exceptions(func):
'''
A decorator to automatically catch exceptions. Can be extended to use,
e.g., sentry/raven to provide a context and more detailed traceback info.
'''
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except:
logging.error(f'Exception ocurred: {traceback.format_exc()}, context={locals()}')
return wrapper
class Worker(object):
def __init__(self, param1):
self.__private_var = ""
self.param1 = param1
@catch_exceptions
def run(self):
logging.info(f"The value of param1 is: '{self.param1}'")
logging.info(f"I ran. Woohoo!")
raise Exception('Forcing an exception here to test the decorator.')
if __name__ == "__main__":
worker = Worker(param1=12.76523)
worker.run()
|
{
"content_hash": "fd295832b7133779434805f6060811a8",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 93,
"avg_line_length": 30.346938775510203,
"alnum_prop": 0.5965030262273033,
"repo_name": "tiagoprn/experiments",
"id": "5f62f3d75d725d103e36b7590f7ea1926d8e08cc",
"size": "1487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "micro/bootstrapping.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6285"
},
{
"name": "Dockerfile",
"bytes": "1625"
},
{
"name": "HTML",
"bytes": "66184"
},
{
"name": "JavaScript",
"bytes": "56617"
},
{
"name": "Makefile",
"bytes": "10847"
},
{
"name": "Mako",
"bytes": "824"
},
{
"name": "Python",
"bytes": "273134"
},
{
"name": "Shell",
"bytes": "9257"
}
],
"symlink_target": ""
}
|
import types
import inspect
import functools
from pytoad import Connection
from groundhogday import GroundhogDayClass
class AirbrakeNotifier(object):
def __init__(self, **kwargs):
self.environment = kwargs.get('environment', 'production')
self.additional_information = kwargs['additional_information']
self.pytoad_connection = Connection(**kwargs)
def __call__(self, e):
if not self.environment == 'test':
self.pytoad_connection.send_to_hoptoad(e, self.additional_information)
else:
print 'Would send (%s %s) to airbrake' % (e, self.additional_information)
def _get_additional_information(f):
try:
sourcelines = inspect.getsourcelines(f)
return "method=%s file=%s line=%s" % (f.__name__, inspect.getfile(f).split('/')[-1],sourcelines[1])
except:
return None
def RetryWithAirbrake(*args, **kwargs):
if len(args) and type(args[0]) == types.FunctionType: # We called without parenthesis.
functools.wraps(args[0])
return GroundhogDayClass(args[0], notification_callback=AirbrakeNotifier(additional_information=_get_additional_information(args[0])))()
else:
def wrap(f):
functools.wraps(f)
kwargs['additional_information'] = _get_additional_information(f)
kwargs['notification_callback'] = AirbrakeNotifier(**kwargs)
return GroundhogDayClass(f, **kwargs)()
return wrap
|
{
"content_hash": "3cb430bf976c889ba25ee3f8e4b87918",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 144,
"avg_line_length": 39.78378378378378,
"alnum_prop": 0.6569293478260869,
"repo_name": "bcoe/groundhogday",
"id": "751b770b9f0820dbbf92254d988b8be3ecf9e440",
"size": "1472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "groundhogday/retry_with_airbrake.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12076"
}
],
"symlink_target": ""
}
|
from abc import ABCMeta
from abc import abstractmethod
from abc import abstractproperty
class NvpApiClient(object):
'''An abstract baseclass for all NvpApiClient implementations.
This defines the interface and property structure for synchronous and
coroutine-based classes.
'''
__metaclass__ = ABCMeta
CONN_IDLE_TIMEOUT = 60 * 15
@abstractmethod
def update_providers(self, api_providers):
pass
@abstractproperty
def user(self):
pass
@abstractproperty
def password(self):
pass
@abstractproperty
def auth_cookie(self):
pass
@abstractmethod
def acquire_connection(self):
pass
@abstractmethod
def release_connection(self, http_conn, bad_state=False):
pass
@abstractproperty
def need_login(self):
pass
@abstractmethod
def wait_for_login(self):
pass
@abstractmethod
def login(self):
pass
|
{
"content_hash": "63d797405b7a9d3f6adfc351ec12951a",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 73,
"avg_line_length": 18.901960784313726,
"alnum_prop": 0.6535269709543569,
"repo_name": "aristanetworks/arista-ovs-quantum",
"id": "e3cc9d1d0d86f3515382fcc26cd99e84135e5f4a",
"size": "1722",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "quantum/plugins/nicira/nicira_nvp_plugin/api_client/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "235"
},
{
"name": "Python",
"bytes": "2568389"
},
{
"name": "Scala",
"bytes": "4525"
},
{
"name": "Shell",
"bytes": "7843"
},
{
"name": "XML",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from prompt_toolkit.contrib.completers import WordCompleter
from prompt_toolkit.contrib.telnet.application import TelnetApplication
from prompt_toolkit.contrib.telnet.server import TelnetServer
from prompt_toolkit.shortcuts import create_prompt_application
from prompt_toolkit.application import AbortAction
from pygments.lexers import HtmlLexer
import logging
# Set up logging
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
class ExampleApplication(TelnetApplication):
def client_connected(self, telnet_connection):
# When a client is connected, erase the screen from the client and say
# Hello.
telnet_connection.erase_screen()
telnet_connection.send('Welcome!\n')
# Set CommandLineInterface.
animal_completer = WordCompleter(['alligator', 'ant'])
telnet_connection.set_application(
create_prompt_application(message='Say something: ',
lexer=HtmlLexer,
completer=animal_completer,
on_abort=AbortAction.RETRY),
self.handle_command)
def handle_command(self, telnet_connection, document):
# When the client enters a command, just reply.
if document.text == 'exit':
telnet_connection.close()
else:
telnet_connection.send('You said: %s\n\n' % document.text)
def client_leaving(self, telnet_connection):
# Say 'bye' when the client quits.
telnet_connection.send('Bye.\n')
if __name__ == '__main__':
TelnetServer(application=ExampleApplication(), port=2323).run()
|
{
"content_hash": "a59abbcf8b4f5091153c70f97ce4cffd",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 78,
"avg_line_length": 36.212765957446805,
"alnum_prop": 0.663924794359577,
"repo_name": "melund/python-prompt-toolkit",
"id": "60fdf5cf7f4c32469d57c9e75e510b7ad4118a44",
"size": "1724",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/telnet.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "860038"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_firewall
description:
- Each network has its own firewall controlling access to and from the instances.
- All traffic to instances, even from other instances, is blocked by the firewall
unless firewall rules are created to allow it.
- The default network has automatically created firewall rules that are shown in default
firewall rules. No manually created network has automatically created firewall rules
except for a default "allow" rule for outgoing traffic and a default "deny" for
incoming traffic. For all networks except the default network, you must create any
firewall rules you need.
short_description: Creates a GCP Firewall
version_added: 2.6
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
allowed:
description:
- The list of ALLOW rules specified by this firewall. Each rule specifies a protocol
and port-range tuple that describes a permitted connection.
required: false
suboptions:
ip_protocol:
description:
- The IP protocol to which this rule applies. The protocol type is required
when creating a firewall rule. This value can either be one of the following
well known protocol strings (tcp, udp, icmp, esp, ah, sctp), or the IP protocol
number.
required: true
ports:
description:
- An optional list of ports to which this rule applies. This field is only
applicable for UDP or TCP protocol. Each entry must be either an integer
or a range. If not specified, this rule applies to connections through any
port.
- 'Example inputs include: ["22"], ["80","443"], and ["12345-12349"].'
required: false
denied:
description:
- The list of DENY rules specified by this firewall. Each rule specifies a protocol
and port-range tuple that describes a denied connection.
required: false
version_added: 2.8
suboptions:
ip_protocol:
description:
- The IP protocol to which this rule applies. The protocol type is required
when creating a firewall rule. This value can either be one of the following
well known protocol strings (tcp, udp, icmp, esp, ah, sctp), or the IP protocol
number.
required: true
ports:
description:
- An optional list of ports to which this rule applies. This field is only
applicable for UDP or TCP protocol. Each entry must be either an integer
or a range. If not specified, this rule applies to connections through any
port.
- 'Example inputs include: ["22"], ["80","443"], and ["12345-12349"].'
required: false
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
required: false
destination_ranges:
description:
- If destination ranges are specified, the firewall will apply only to traffic
that has destination IP address in these ranges. These ranges must be expressed
in CIDR format. Only IPv4 is supported.
required: false
version_added: 2.8
direction:
description:
- 'Direction of traffic to which this firewall applies; default is INGRESS. Note:
For INGRESS traffic, it is NOT supported to specify destinationRanges; For EGRESS
traffic, it is NOT supported to specify sourceRanges OR sourceTags.'
required: false
version_added: 2.8
choices:
- INGRESS
- EGRESS
disabled:
description:
- Denotes whether the firewall rule is disabled, i.e not applied to the network
it is associated with. When set to true, the firewall rule is not enforced and
the network behaves as if it did not exist. If this is unspecified, the firewall
rule will be enabled.
required: false
type: bool
version_added: 2.8
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
network:
description:
- 'URL of the network resource for this firewall rule. If not specified when creating
a firewall rule, the default network is used: global/networks/default If you
choose to specify this property, you can specify the network as a full or partial
URL. For example, the following are all valid URLs: U(https://www.googleapis.com/compute/v1/projects/myproject/global/)
networks/my-network projects/myproject/global/networks/my-network global/networks/default
.'
- 'This field represents a link to a Network resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''selfLink'' and value
of your resource''s selfLink Alternatively, you can add `register: name-of-resource`
to a gcp_compute_network task and then set this network field to "{{ name-of-resource
}}"'
required: false
default:
selfLink: global/networks/default
priority:
description:
- Priority for this rule. This is an integer between 0 and 65535, both inclusive.
When not specified, the value assumed is 1000. Relative priorities determine
precedence of conflicting rules. Lower value of priority implies higher precedence
(eg, a rule with priority 0 has higher precedence than a rule with priority
1). DENY rules take precedence over ALLOW rules having equal priority.
required: false
default: '1000'
version_added: 2.8
source_ranges:
description:
- If source ranges are specified, the firewall will apply only to traffic that
has source IP address in these ranges. These ranges must be expressed in CIDR
format. One or both of sourceRanges and sourceTags may be set. If both properties
are set, the firewall will apply to traffic that has source IP address within
sourceRanges OR the source IP that belongs to a tag listed in the sourceTags
property. The connection does not need to match both properties for the firewall
to apply. Only IPv4 is supported.
required: false
source_service_accounts:
description:
- If source service accounts are specified, the firewall will apply only to traffic
originating from an instance with a service account in this list. Source service
accounts cannot be used to control traffic to an instance's external IP address
because service accounts are associated with an instance, not an IP address.
sourceRanges can be set at the same time as sourceServiceAccounts. If both are
set, the firewall will apply to traffic that has source IP address within sourceRanges
OR the source IP belongs to an instance with service account listed in sourceServiceAccount.
The connection does not need to match both properties for the firewall to apply.
sourceServiceAccounts cannot be used at the same time as sourceTags or targetTags.
required: false
version_added: 2.8
source_tags:
description:
- If source tags are specified, the firewall will apply only to traffic with source
IP that belongs to a tag listed in source tags. Source tags cannot be used to
control traffic to an instance's external IP address. Because tags are associated
with an instance, not an IP address. One or both of sourceRanges and sourceTags
may be set. If both properties are set, the firewall will apply to traffic that
has source IP address within sourceRanges OR the source IP that belongs to a
tag listed in the sourceTags property. The connection does not need to match
both properties for the firewall to apply.
required: false
target_service_accounts:
description:
- A list of service accounts indicating sets of instances located in the network
that may make network connections as specified in allowed[].
- targetServiceAccounts cannot be used at the same time as targetTags or sourceTags.
If neither targetServiceAccounts nor targetTags are specified, the firewall
rule applies to all instances on the specified network.
required: false
version_added: 2.8
target_tags:
description:
- A list of instance tags indicating sets of instances located in the network
that may make network connections as specified in allowed[].
- If no targetTags are specified, the firewall rule applies to all instances on
the specified network.
required: false
extends_documentation_fragment: gcp
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/firewalls)'
- 'Official Documentation: U(https://cloud.google.com/vpc/docs/firewalls)'
'''
EXAMPLES = '''
- name: create a firewall
gcp_compute_firewall:
name: test_object
allowed:
- ip_protocol: tcp
ports:
- '22'
target_tags:
- test-ssh-server
- staging-ssh-server
source_tags:
- test-ssh-clients
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
allowed:
description:
- The list of ALLOW rules specified by this firewall. Each rule specifies a protocol
and port-range tuple that describes a permitted connection.
returned: success
type: complex
contains:
ip_protocol:
description:
- The IP protocol to which this rule applies. The protocol type is required
when creating a firewall rule. This value can either be one of the following
well known protocol strings (tcp, udp, icmp, esp, ah, sctp), or the IP protocol
number.
returned: success
type: str
ports:
description:
- An optional list of ports to which this rule applies. This field is only applicable
for UDP or TCP protocol. Each entry must be either an integer or a range.
If not specified, this rule applies to connections through any port.
- 'Example inputs include: ["22"], ["80","443"], and ["12345-12349"].'
returned: success
type: list
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
denied:
description:
- The list of DENY rules specified by this firewall. Each rule specifies a protocol
and port-range tuple that describes a denied connection.
returned: success
type: complex
contains:
ip_protocol:
description:
- The IP protocol to which this rule applies. The protocol type is required
when creating a firewall rule. This value can either be one of the following
well known protocol strings (tcp, udp, icmp, esp, ah, sctp), or the IP protocol
number.
returned: success
type: str
ports:
description:
- An optional list of ports to which this rule applies. This field is only applicable
for UDP or TCP protocol. Each entry must be either an integer or a range.
If not specified, this rule applies to connections through any port.
- 'Example inputs include: ["22"], ["80","443"], and ["12345-12349"].'
returned: success
type: list
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
destinationRanges:
description:
- If destination ranges are specified, the firewall will apply only to traffic that
has destination IP address in these ranges. These ranges must be expressed in
CIDR format. Only IPv4 is supported.
returned: success
type: list
direction:
description:
- 'Direction of traffic to which this firewall applies; default is INGRESS. Note:
For INGRESS traffic, it is NOT supported to specify destinationRanges; For EGRESS
traffic, it is NOT supported to specify sourceRanges OR sourceTags.'
returned: success
type: str
disabled:
description:
- Denotes whether the firewall rule is disabled, i.e not applied to the network
it is associated with. When set to true, the firewall rule is not enforced and
the network behaves as if it did not exist. If this is unspecified, the firewall
rule will be enabled.
returned: success
type: bool
id:
description:
- The unique identifier for the resource.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
network:
description:
- 'URL of the network resource for this firewall rule. If not specified when creating
a firewall rule, the default network is used: global/networks/default If you choose
to specify this property, you can specify the network as a full or partial URL.
For example, the following are all valid URLs: U(https://www.googleapis.com/compute/v1/projects/myproject/global/)
networks/my-network projects/myproject/global/networks/my-network global/networks/default
.'
returned: success
type: dict
priority:
description:
- Priority for this rule. This is an integer between 0 and 65535, both inclusive.
When not specified, the value assumed is 1000. Relative priorities determine precedence
of conflicting rules. Lower value of priority implies higher precedence (eg, a
rule with priority 0 has higher precedence than a rule with priority 1). DENY
rules take precedence over ALLOW rules having equal priority.
returned: success
type: int
sourceRanges:
description:
- If source ranges are specified, the firewall will apply only to traffic that has
source IP address in these ranges. These ranges must be expressed in CIDR format.
One or both of sourceRanges and sourceTags may be set. If both properties are
set, the firewall will apply to traffic that has source IP address within sourceRanges
OR the source IP that belongs to a tag listed in the sourceTags property. The
connection does not need to match both properties for the firewall to apply. Only
IPv4 is supported.
returned: success
type: list
sourceServiceAccounts:
description:
- If source service accounts are specified, the firewall will apply only to traffic
originating from an instance with a service account in this list. Source service
accounts cannot be used to control traffic to an instance's external IP address
because service accounts are associated with an instance, not an IP address. sourceRanges
can be set at the same time as sourceServiceAccounts. If both are set, the firewall
will apply to traffic that has source IP address within sourceRanges OR the source
IP belongs to an instance with service account listed in sourceServiceAccount.
The connection does not need to match both properties for the firewall to apply.
sourceServiceAccounts cannot be used at the same time as sourceTags or targetTags.
returned: success
type: list
sourceTags:
description:
- If source tags are specified, the firewall will apply only to traffic with source
IP that belongs to a tag listed in source tags. Source tags cannot be used to
control traffic to an instance's external IP address. Because tags are associated
with an instance, not an IP address. One or both of sourceRanges and sourceTags
may be set. If both properties are set, the firewall will apply to traffic that
has source IP address within sourceRanges OR the source IP that belongs to a tag
listed in the sourceTags property. The connection does not need to match both
properties for the firewall to apply.
returned: success
type: list
targetServiceAccounts:
description:
- A list of service accounts indicating sets of instances located in the network
that may make network connections as specified in allowed[].
- targetServiceAccounts cannot be used at the same time as targetTags or sourceTags.
If neither targetServiceAccounts nor targetTags are specified, the firewall rule
applies to all instances on the specified network.
returned: success
type: list
targetTags:
description:
- A list of instance tags indicating sets of instances located in the network that
may make network connections as specified in allowed[].
- If no targetTags are specified, the firewall rule applies to all instances on
the specified network.
returned: success
type: list
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import re
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
allowed=dict(type='list', elements='dict', options=dict(ip_protocol=dict(required=True, type='str'), ports=dict(type='list', elements='str'))),
denied=dict(type='list', elements='dict', options=dict(ip_protocol=dict(required=True, type='str'), ports=dict(type='list', elements='str'))),
description=dict(type='str'),
destination_ranges=dict(type='list', elements='str'),
direction=dict(type='str', choices=['INGRESS', 'EGRESS']),
disabled=dict(type='bool'),
name=dict(required=True, type='str'),
network=dict(default=dict(selfLink='global/networks/default'), type='dict'),
priority=dict(default=1000, type='int'),
source_ranges=dict(type='list', elements='str'),
source_service_accounts=dict(type='list', elements='str'),
source_tags=dict(type='list', elements='str'),
target_service_accounts=dict(type='list', elements='str'),
target_tags=dict(type='list', elements='str'),
),
mutually_exclusive=[
['allowed', 'denied'],
['destination_ranges', 'source_ranges', 'source_tags'],
['destination_ranges', 'source_ranges'],
['source_service_accounts', 'source_tags', 'target_tags'],
['destination_ranges', 'source_service_accounts', 'source_tags', 'target_service_accounts'],
['source_tags', 'target_service_accounts', 'target_tags'],
['source_service_accounts', 'target_service_accounts', 'target_tags'],
],
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#firewall'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.patch(link, resource_to_request(module)))
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#firewall',
u'allowed': FirewallAllowedArray(module.params.get('allowed', []), module).to_request(),
u'denied': FirewallDeniedArray(module.params.get('denied', []), module).to_request(),
u'description': module.params.get('description'),
u'destinationRanges': module.params.get('destination_ranges'),
u'direction': module.params.get('direction'),
u'disabled': module.params.get('disabled'),
u'name': module.params.get('name'),
u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'),
u'priority': module.params.get('priority'),
u'sourceRanges': module.params.get('source_ranges'),
u'sourceServiceAccounts': module.params.get('source_service_accounts'),
u'sourceTags': module.params.get('source_tags'),
u'targetServiceAccounts': module.params.get('target_service_accounts'),
u'targetTags': module.params.get('target_tags'),
}
request = encode_request(request, module)
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/firewalls/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/firewalls".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'allowed': FirewallAllowedArray(response.get(u'allowed', []), module).from_response(),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'denied': FirewallDeniedArray(response.get(u'denied', []), module).from_response(),
u'description': response.get(u'description'),
u'destinationRanges': response.get(u'destinationRanges'),
u'direction': response.get(u'direction'),
u'disabled': response.get(u'disabled'),
u'id': response.get(u'id'),
u'name': module.params.get('name'),
u'network': response.get(u'network'),
u'priority': response.get(u'priority'),
u'sourceRanges': response.get(u'sourceRanges'),
u'sourceServiceAccounts': response.get(u'sourceServiceAccounts'),
u'sourceTags': response.get(u'sourceTags'),
u'targetServiceAccounts': response.get(u'targetServiceAccounts'),
u'targetTags': response.get(u'targetTags'),
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#firewall')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
def encode_request(request, module):
if 'network' in request and request['network'] is not None:
if not re.match(r'https://www.googleapis.com/compute/v1/projects/.*', request['network']):
request['network'] = 'https://www.googleapis.com/compute/v1/projects/{project}/{network}'.format(
project=module.params['project'], network=request['network']
)
return request
class FirewallAllowedArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({u'IPProtocol': item.get('ip_protocol'), u'ports': item.get('ports')})
def _response_from_item(self, item):
return remove_nones_from_dict({u'IPProtocol': item.get(u'IPProtocol'), u'ports': item.get(u'ports')})
class FirewallDeniedArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({u'IPProtocol': item.get('ip_protocol'), u'ports': item.get('ports')})
def _response_from_item(self, item):
return remove_nones_from_dict({u'IPProtocol': item.get(u'IPProtocol'), u'ports': item.get(u'ports')})
if __name__ == '__main__':
main()
|
{
"content_hash": "90114d324feba7e43df006d5fec01f4f",
"timestamp": "",
"source": "github",
"line_count": 685,
"max_line_length": 155,
"avg_line_length": 41.68759124087591,
"alnum_prop": 0.6766704020170893,
"repo_name": "SergeyCherepanov/ansible",
"id": "082d6972fde887f6e8ef9d79111c8688c33b50ce",
"size": "29293",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/cloud/google/gcp_compute_firewall.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
}
|
from .frame import *
from .protocol import *
|
{
"content_hash": "936f49f3c79f5635860ce674d09edbf8",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 23,
"avg_line_length": 22.5,
"alnum_prop": 0.7333333333333333,
"repo_name": "fimad/mitmproxy",
"id": "1c143919211a70cd6145a48e2d96d1c99a26912e",
"size": "45",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "netlib/websockets/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "410"
},
{
"name": "CSS",
"bytes": "211484"
},
{
"name": "HTML",
"bytes": "60717"
},
{
"name": "JavaScript",
"bytes": "1755960"
},
{
"name": "Makefile",
"bytes": "7560"
},
{
"name": "Python",
"bytes": "1242699"
},
{
"name": "Shell",
"bytes": "3936"
}
],
"symlink_target": ""
}
|
"""TODO(tsitsulin): add headers, tests, and improve style."""
from typing import List
import numpy as np
import scipy.sparse
def subgraph(graph, seed, n_neighbors): # pylint: disable=missing-function-docstring
total_matrix_size = 1 + np.cumprod(n_neighbors).sum()
picked_nodes = {seed}
last_layer_nodes = {seed}
# Number of nodes to pick at each layer. Initially, only the seed is picked.
to_pick = 1
for n_neighbors_current in n_neighbors:
to_pick = to_pick * n_neighbors_current
neighbors = graph[list(last_layer_nodes), :].nonzero()[1]
neighbors = list(set(
neighbors)) # Make a set neighbors of all nodes from the last layer.
n_neigbors_real = min(
to_pick,
len(neighbors)) # Handle case there are fewer neighbors than desired.
last_layer_nodes = set(
np.random.choice(neighbors, n_neigbors_real, replace=False))
picked_nodes |= last_layer_nodes
indices = [seed] + list(sorted(picked_nodes - {seed}))
matrix = graph[indices, :][:, indices]
matrix.resize((total_matrix_size, total_matrix_size))
return matrix.todense().A1.reshape(total_matrix_size,
total_matrix_size), indices
def make_batch(graph, features, # pylint: disable=missing-function-docstring
batch_nodes, n_neighbors):
total_matrix_size = 1 + np.cumprod(n_neighbors).sum()
batch_size = len(batch_nodes)
graph_ss = np.zeros((batch_size, total_matrix_size,
total_matrix_size)) # Subsampled graph matrix.
features_ss = np.zeros((batch_size, total_matrix_size,
features.shape[1])) # Subsampled feature matrix.
subgraph_sizes = np.zeros(batch_size, dtype=np.int)
for index, node in enumerate(batch_nodes):
graph_ss[index, :, :], indices = subgraph(graph, node, n_neighbors)
subgraph_sizes[index] = len(indices)
features_ss[index, :subgraph_sizes[index], :] = features[indices, :]
return graph_ss, features_ss, subgraph_sizes
def full_graph_batch(graph, features,
n_neighbors):
node_ids = np.arange(graph.shape[0])
graph_ss, features_ss, subgraph_sizes = make_batch(graph, features, node_ids,
n_neighbors)
return graph_ss, features_ss, node_ids, subgraph_sizes
def random_batch(graph, features,
batch_size, n_neighbors):
node_ids = np.random.randint(graph.shape[0], size=batch_size)
graph_ss, features_ss, subgraph_sizes = make_batch(graph, features, node_ids,
n_neighbors)
return graph_ss, features_ss, node_ids, subgraph_sizes
|
{
"content_hash": "81b2037279270e38d1e2b00b69cecb48",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 85,
"avg_line_length": 42.95161290322581,
"alnum_prop": 0.641381900112655,
"repo_name": "google-research/google-research",
"id": "329d83d7ba2854fb2ecf4b8ae87bf111f584e4cc",
"size": "3271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graph_embedding/dmon/utilities/batching.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
}
|
import os
import sys
import django
from django.conf import settings
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"formly"
# "formly.tests",
],
MIDDLEWARE_CLASSES=[],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
SITE_ID=1,
ROOT_URLCONF="formly.tests.urls",
SECRET_KEY="notasecret",
)
def run(*args):
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
django.setup()
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
django.core.management.call_command(
"makemigrations",
"formly",
*args
)
if __name__ == "__main__":
run(*sys.argv[1:])
|
{
"content_hash": "df0c2ece9a5214366e2f5e292c53a14f",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 55,
"avg_line_length": 18.76595744680851,
"alnum_prop": 0.564625850340136,
"repo_name": "eldarion/formly",
"id": "dfaab825893ff9395286c4719b0070e4f774b220",
"size": "904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "makemigrations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "27863"
},
{
"name": "Makefile",
"bytes": "120"
},
{
"name": "Python",
"bytes": "118321"
}
],
"symlink_target": ""
}
|
from pipes.pipe import Pipe
import numpy as np
from math import sin, pi
# Legt einen Regenbogen-Filter über das Bild
# Von Luca Schimweg (@lucaschimweg)
class RainbowPipe(Pipe):
def __init__(self):
self.initialized = False
def getSinVal(self, x, offset):
return (sin((((x - offset) * pi)
/ (self.width / 2))) + 1) / 2
def init_pipe(self, data):
self.initialized = True
self.resolution = data.shape
self.width = data.shape[1]
self.vals_0 = np.array([[self.getSinVal(x, 0)
for x in range(self.width)]])
self.vals_1 = np.array([[self.getSinVal(x, self.width / 3)
for x in range(self.width)]])
self.vals_2 = np.array([[self.getSinVal(x, (self.width / 3) * 2)
for x in range(self.width)]])
def pipe(self, data):
if not self.initialized:
self.init_pipe(data)
data[:, :, 0] = data[:, :, 0] * self.vals_0
data[:, :, 1] = data[:, :, 1] * self.vals_1
data[:, :, 2] = data[:, :, 2] * self.vals_2
return data
def __enter__(self):
return self
def __exit__(self, exit_type, value, traceback):
pass
|
{
"content_hash": "b017103215894618027b682cb7b6e7e2",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 73,
"avg_line_length": 32.35897435897436,
"alnum_prop": 0.5174326465927099,
"repo_name": "jstriebel/webcam-effects",
"id": "1ac5b2723d6862e8ff893eee1932db0fb81961ca",
"size": "1263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipes/rainbow_pipe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1187"
},
{
"name": "Python",
"bytes": "12888"
}
],
"symlink_target": ""
}
|
from helpers import unittest
from luigi.mock import MockTarget, MockFileSystem
from luigi.format import Nop
class MockFileTest(unittest.TestCase):
def test_1(self):
t = MockTarget('test')
p = t.open('w')
print('test', file=p)
p.close()
q = t.open('r')
self.assertEqual(list(q), ['test\n'])
q.close()
def test_with(self):
t = MockTarget("foo")
with t.open('w') as b:
b.write("bar")
with t.open('r') as b:
self.assertEqual(list(b), ['bar'])
def test_bytes(self):
t = MockTarget("foo", format=Nop)
with t.open('wb') as b:
b.write(b"bar")
with t.open('rb') as b:
self.assertEqual(list(b), [b'bar'])
def test_default_mode_value(self):
t = MockTarget("foo")
with t.open('w') as b:
b.write("bar")
with t.open() as b:
self.assertEqual(list(b), ['bar'])
def test_mode_none_error(self):
t = MockTarget("foo")
with self.assertRaises(TypeError):
with t.open(None) as b:
b.write("bar")
# That should work in python2 because of the autocast
# That should work in python3 because the default format is Text
def test_unicode(self):
t = MockTarget("foo")
with t.open('w') as b:
b.write(u"bar")
with t.open('r') as b:
self.assertEqual(b.read(), u'bar')
class MockFileSystemTest(unittest.TestCase):
fs = MockFileSystem()
def _touch(self, path):
t = MockTarget(path)
with t.open('w'):
pass
def setUp(self):
self.fs.clear()
self.path = "/tmp/foo"
self.path2 = "/tmp/bar"
self.path3 = "/tmp/foobar"
self._touch(self.path)
self._touch(self.path2)
def test_copy(self):
self.fs.copy(self.path, self.path3)
self.assertTrue(self.fs.exists(self.path))
self.assertTrue(self.fs.exists(self.path3))
def test_exists(self):
self.assertTrue(self.fs.exists(self.path))
def test_remove(self):
self.fs.remove(self.path)
self.assertFalse(self.fs.exists(self.path))
def test_remove_recursive(self):
self.fs.remove("/tmp", recursive=True)
self.assertFalse(self.fs.exists(self.path))
self.assertFalse(self.fs.exists(self.path2))
def test_rename(self):
self.fs.rename(self.path, self.path3)
self.assertFalse(self.fs.exists(self.path))
self.assertTrue(self.fs.exists(self.path3))
def test_listdir(self):
self.assertEqual(sorted([self.path, self.path2]), sorted(self.fs.listdir("/tmp")))
|
{
"content_hash": "17ab0cee98508c926e10d08d220582c2",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 90,
"avg_line_length": 27.353535353535353,
"alnum_prop": 0.568685376661743,
"repo_name": "riga/luigi",
"id": "7f51cd2378ec6f6f7e04b4aedcd9903b1f377953",
"size": "3312",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/mock_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5575"
},
{
"name": "HTML",
"bytes": "43591"
},
{
"name": "JavaScript",
"bytes": "178078"
},
{
"name": "Python",
"bytes": "2145021"
},
{
"name": "Shell",
"bytes": "2973"
}
],
"symlink_target": ""
}
|
import os.path
import subprocess
import tempfile
import rasterio
with rasterio.drivers():
with rasterio.open('tests/data/RGB.byte.tif') as src:
b, g, r = (src.read_band(k) for k in (1, 2, 3))
meta = src.meta
tmpfilename = os.path.join(tempfile.mkdtemp(), 'decimate.tif')
meta.update(
width=src.width/2,
height=src.height/2)
with rasterio.open(
tmpfilename, 'w',
**meta
) as dst:
for k, a in [(1, b), (2, g), (3, r)]:
dst.write_band(k, a)
outfilename = os.path.join(tempfile.mkdtemp(), 'decimate.jpg')
rasterio.copy(tmpfilename, outfilename, driver='JPEG', quality='30')
info = subprocess.call(['open', outfilename])
|
{
"content_hash": "3c06352ea58fc13b4f9d7fc51b6fb743",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 72,
"avg_line_length": 23.806451612903224,
"alnum_prop": 0.5880758807588076,
"repo_name": "clembou/rasterio",
"id": "12b2d1a2cdcfb9e0a102821fa27ca22fcde36377",
"size": "738",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "examples/decimate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "34752"
},
{
"name": "Python",
"bytes": "508008"
},
{
"name": "Shell",
"bytes": "742"
}
],
"symlink_target": ""
}
|
import os
import logging
import dill
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow.contrib.learn as learn
from scipy.stats import entropy
from deeptext.models.base import Base
from deeptext.utils.graph import freeze_graph, load_graph
from deeptext.utils.csv import read_csv
from deeptext.utils.serialization import save, restore
import deeptext.models.constants as constants
from utils import read_data
class SequenceLabeling(Base):
def __init__(self, params):
super(SequenceLabeling, self).__init__(params)
model_dir = self.params[constants.PARAM_KEY_MODEL_DIR]
token_vocab_path = os.path.join(model_dir, constants.FILENAME_TOKEN_VOCAB)
if os.path.isfile(token_vocab_path):
logging.info("loading token vocabulary ...")
self.token_vocab = restore(token_vocab_path)
logging.info("token vocabulary size = %d", len(self.token_vocab.vocabulary_))
else:
self.token_vocab = None
label_vocab_path = os.path.join(model_dir, constants.FILENAME_LABEL_VOCAB)
if os.path.isfile(label_vocab_path):
logging.info("loading label vocabulary ...")
self.label_vocab = restore(label_vocab_path)
logging.info("label vocabulary size = %d", len(self.label_vocab.vocabulary_))
else:
self.label_vocab = None
def preprocess(self, training_data_path):
def tokenizer(iterator):
for value in iterator:
yield value
tokens, labels = read_data(training_data_path)
model_dir = self.params[constants.PARAM_KEY_MODEL_DIR]
if self.token_vocab is None:
logging.info("generating token vocabulary ...")
self.token_vocab = learn.preprocessing.VocabularyProcessor(
max_document_length=self.params[constants.PARAM_KEY_MAX_DOCUMENT_LEN],
tokenizer_fn=tokenizer)
self.token_vocab.fit(tokens)
logging.info("token vocabulary size = %d", len(self.token_vocab.vocabulary_))
token_vocab_path = os.path.join(model_dir, constants.FILENAME_TOKEN_VOCAB)
save(self.token_vocab, token_vocab_path)
self.token_ids = self.preprocess_token_transform(tokens)
self.params[constants.PARAM_KEY_TOKEN_VOCAB_SIZE] = len(self.token_vocab.vocabulary_)
if self.label_vocab is None:
logging.info("generating label vocabulary ...")
self.label_vocab = learn.preprocessing.VocabularyProcessor(
max_document_length=self.params[constants.PARAM_KEY_MAX_DOCUMENT_LEN],
tokenizer_fn=tokenizer)
self.label_vocab.fit(labels)
logging.info("label vocabulary size = %d", len(self.label_vocab.vocabulary_))
label_vocab_path = os.path.join(model_dir, constants.FILENAME_LABEL_VOCAB)
save(self.label_vocab, label_vocab_path)
self.label_ids = self.preprocess_label_transform(labels)
self.params[constants.PARAM_KEY_LABEL_VOCAB_SIZE] = len(self.label_vocab.vocabulary_)
self.tensor_tokens = tf.placeholder_with_default(self.token_ids, name=constants.TENSOR_NAME_TOKENS, shape=[None, self.params[constants.PARAM_KEY_MAX_DOCUMENT_LEN]])
self.tensor_labels = tf.placeholder_with_default(self.label_ids, name=constants.TENSOR_NAME_LABELS, shape=[None, self.params[constants.PARAM_KEY_MAX_DOCUMENT_LEN]])
self.build_model(self.tensor_tokens, self.tensor_labels)
def preprocess_token_transform(self, tokens):
token_ids = self.token_vocab.transform(tokens)
return np.array(list(token_ids))
def preprocess_label_transform(self, labels):
label_ids = self.label_vocab.transform(labels)
return np.array(list(label_ids))
def build_model(self, x, y):
TOKEN_VOCAB_SIZE = self.params[constants.PARAM_KEY_TOKEN_VOCAB_SIZE]
LABEL_VOCAB_SIZE = self.params[constants.PARAM_KEY_LABEL_VOCAB_SIZE]
MAX_DOCUMENT_LEN = self.params[constants.PARAM_KEY_MAX_DOCUMENT_LEN]
EMBEDDING_SIZE = self.params[constants.PARAM_KEY_EMBEDDING_SIZE]
DROPOUT_PROB = self.params[constants.PARAM_KEY_DROPOUT_PROB]
word_vectors = tf.contrib.layers.embed_sequence(
x, vocab_size=TOKEN_VOCAB_SIZE, embed_dim=EMBEDDING_SIZE, scope='words')
cell = tf.contrib.rnn.LSTMCell(EMBEDDING_SIZE)
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=DROPOUT_PROB)
output, _ = tf.nn.dynamic_rnn(cell, word_vectors, dtype=tf.float32)
output = tf.reshape(output, [-1, EMBEDDING_SIZE])
logits = tf.contrib.layers.fully_connected(output, LABEL_VOCAB_SIZE)
logits = tf.reshape(logits, [-1, MAX_DOCUMENT_LEN, LABEL_VOCAB_SIZE], name=constants.TENSOR_NAME_LOGITS)
zeros_with_shape = tf.zeros_like(y, dtype=tf.int64)
weights = tf.to_double(tf.reshape(tf.not_equal(zeros_with_shape, y), [-1]))
target = tf.one_hot(y, LABEL_VOCAB_SIZE, 1, 0)
loss = tf.contrib.losses.softmax_cross_entropy(
tf.reshape(logits, [-1, LABEL_VOCAB_SIZE]),
tf.reshape(target, [-1, LABEL_VOCAB_SIZE]),
weights=weights)
self.tensor_loss = tf.identity(loss, name=constants.TENSOR_NAME_LOSS)
self.summ_training_loss = tf.summary.scalar("training_loss", self.tensor_loss)
self.summ_validation_loss = tf.summary.scalar("validation_loss", self.tensor_loss)
# Create a training op.
self.tensor_optimizer = tf.contrib.layers.optimize_loss(
self.tensor_loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
self.tensor_prediction = tf.argmax(logits, 2, name=constants.TENSOR_NAME_PREDICTION)
self.summ = tf.summary.merge_all()
def fit(self, steps, batch_size, training_data_path, validation_data_path=None):
self.preprocess(training_data_path)
validation_token_ids = None
validation_label_ids = None
if validation_data_path is not None:
tokens, labels = read_data(validation_data_path)
validation_token_ids = self.preprocess_token_transform(tokens)
validation_label_ids = self.preprocess_label_transform(labels)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
self.restore(sess)
writer = tf.summary.FileWriter("/tmp/deeptext/models/sequence_labeling/base_model")
writer.add_graph(sess.graph)
for i in xrange(steps):
curr_row_ids = np.random.choice(self.token_ids.shape[0], batch_size)
curr_token_ids = self.token_ids[curr_row_ids]
curr_label_ids = self.label_ids[curr_row_ids]
sess.run(self.tensor_optimizer, feed_dict={self.tensor_tokens: curr_token_ids, self.tensor_labels: curr_label_ids})
if (i + 1) % 100 == 0:
c, s = sess.run([self.tensor_loss, self.summ_training_loss], feed_dict={self.tensor_tokens: curr_token_ids, self.tensor_labels: curr_label_ids})
writer.add_summary(s, i + 1)
logging.info("step: %d, training loss: %.2f", i + 1, c)
if validation_data_path is not None:
c, s = sess.run([self.tensor_loss, self.summ_validation_loss], feed_dict={self.tensor_tokens: validation_token_ids, self.tensor_labels: validation_label_ids})
writer.add_summary(s, i + 1)
logging.info("step: %d, validation loss: %.2f", i + 1, c)
self.save(sess)
def predict(self, tokens):
tokens_transform = self.preprocess_token_transform(tokens)
labels_transform = self.sess.run(self.prediction_tensor, feed_dict={
self.tokens_tensor: tokens_transform
})
labels = []
for item in self.label_vocab.reverse(labels_transform):
labels.append(item.split(' '))
return labels
def logits(self, tokens):
tokens_transform = self.preprocess_token_transform(tokens)
logits = self.sess.run(self.logits_tensor, feed_dict={
self.tokens_tensor: tokens_transform
})
entropy_list = []
for i in xrange(len(tokens)):
max_entropy = 0
for j in xrange(len(tokens[i])):
cur_entropy = entropy(logits[i][j])
max_entropy = max(cur_entropy, max_entropy)
entropy_list.append(max_entropy)
return entropy_list
def evaluate(self, testing_data_path):
tokens, labels = read_data(testing_data_path)
predicted_labels = self.predict(tokens)
label_corre_cnt = 0
label_total_cnt = 0
sentence_corre_cnt = 0
sentence_total_cnt = len(labels)
for i in xrange(len(labels)):
label_total_cnt += len(labels[i])
sentencen_corre = True
for j in xrange(min(len(labels[i]), len(predicted_labels[i]))):
if labels[i][j] == predicted_labels[i][j]:
label_corre_cnt += 1
else:
sentencen_corre = False
if sentencen_corre:
sentence_corre_cnt += 1
logging.info("total label count: %d, label accuracy: %.2f", label_total_cnt, 1.0 * label_corre_cnt / label_total_cnt)
logging.info("total sentence count: %d, sentence accuracy: %.2f", sentence_total_cnt, 1.0 * sentence_corre_cnt / sentence_total_cnt)
|
{
"content_hash": "2eaf965189114e4a2146d7a4df8ea9f3",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 182,
"avg_line_length": 44.127272727272725,
"alnum_prop": 0.6259785743716523,
"repo_name": "ybbaigo/deeptext",
"id": "be64d15e81d47786e399a4a5890f36a4317171bb",
"size": "9708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deeptext/models/sequence_labeling/sequence_labeling.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "19572"
},
{
"name": "Shell",
"bytes": "449"
}
],
"symlink_target": ""
}
|
from CGATReport.Tracker import SingleTableTrackerRows
from CGATReport.Tracker import SingleTableTrackerHistogram
from MappingReport import MappingTracker
class MappingSummary(MappingTracker, SingleTableTrackerRows):
table = "view_mapping"
class PairedMappingSummary(MappingTracker, SingleTableTrackerRows):
table = "view_mapping"
# select only tracks with paired reads
where = "pairs_total > 0"
class TophatSummary(MappingTracker, SingleTableTrackerRows):
table = "tophat_stats"
class StarSummary(MappingTracker, SingleTableTrackerRows):
table = "star_stats"
class BamSummary(MappingTracker, SingleTableTrackerRows):
table = "bam_stats"
class PicardSummary(MappingTracker, SingleTableTrackerRows):
table = "picard_stats_alignment_summary_metrics"
class PicardDuplicationSummary(MappingTracker, SingleTableTrackerRows):
table = "picard_duplication_metrics"
class PicardRnaMetrics(MappingTracker, SingleTableTrackerRows):
table = "picard_rna_metrics"
class PicardAlignmentSummaryMetrics(MappingTracker, SingleTableTrackerRows):
table = "picard_stats_alignment_summary_metrics"
class PicardInsertSizeMetrics(MappingTracker, SingleTableTrackerRows):
table = "picard_stats_insert_size_metrics"
class PicardDuplicatesMetrics(MappingTracker, SingleTableTrackerRows):
table = "picard_duplicates_duplicate_metrics"
class PicardInsertSizeHistogram(MappingTracker, SingleTableTrackerHistogram):
table = "picard_stats_insert_size_histogram"
column = "insert_size"
class PicardDuplicatesHistogram(MappingTracker,
SingleTableTrackerHistogram):
table = "picard_duplicates_duplicate_histogram"
column = "duplicates"
class PicardQualityByCycleHistogram(MappingTracker,
SingleTableTrackerHistogram):
table = "picard_stats_quality_by_cycle_histogram"
column = "cycle"
class PicardQualityDistributionHistogram(MappingTracker,
SingleTableTrackerHistogram):
table = "picard_stats_quality_distribution_histogram"
column = "quality"
class DuplicationMetricsTable(MappingTracker, SingleTableTrackerHistogram):
table = "picard_complexity_histogram"
def __call__(self, track=None, slice=None):
cols = self.getColumns(self.table)
if len(cols) == 0:
return None
fields = ", ".join(cols)
data = self.getAll(
"SELECT %(fields)s FROM %(table)s ORDER BY coverage_multiple")
return data
class RnaBiasTable(MappingTracker, SingleTableTrackerHistogram):
table = "picard_rna_histogram"
column = "coverage_multiple"
class MappingFlagsMismatches(MappingTracker, SingleTableTrackerHistogram):
table = "bam_stats_nm"
column = "nm"
class MappingFlagsHits(MappingTracker, SingleTableTrackerHistogram):
table = "bam_stats_nh"
column = "nh"
class MappingQuality(MappingTracker, SingleTableTrackerHistogram):
table = "bam_stats_mapq"
column = "mapq"
class AlignmentQualityByCycle(MappingTracker, SingleTableTrackerHistogram):
table = "picard_stats_quality_by_cycle_histogram"
column = "cycle"
class DuplicationMetrics(MappingTracker, SingleTableTrackerHistogram):
table = "picard_complexity_histogram"
column = "coverage_multiple"
class AlignmentQualityDistribution(MappingTracker,
SingleTableTrackerHistogram):
table = "picard_stats_quality_distribution_histogram"
column = "quality"
class MappingContext(MappingTracker, SingleTableTrackerRows):
table = "context_stats"
class FilteringSummary(MappingTracker, SingleTableTrackerRows):
table = "mapping_stats"
class BigwigSummary(MappingTracker, SingleTableTrackerRows):
table = "bigwig_stats"
|
{
"content_hash": "3c55c8fff8d91a72fd0b2e63de1fb5fd",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 77,
"avg_line_length": 28.095588235294116,
"alnum_prop": 0.7390735409578645,
"repo_name": "CGATOxford/CGATPipelines",
"id": "7db383475c0c320b1f6d45c6d3df78378427a818",
"size": "3821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CGATPipelines/pipeline_docs/pipeline_mapping/trackers/Mapping.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4360"
},
{
"name": "HTML",
"bytes": "40732"
},
{
"name": "JavaScript",
"bytes": "302029"
},
{
"name": "Jupyter Notebook",
"bytes": "4393775"
},
{
"name": "Makefile",
"bytes": "45084"
},
{
"name": "Python",
"bytes": "5357820"
},
{
"name": "R",
"bytes": "62312"
},
{
"name": "Shell",
"bytes": "67312"
}
],
"symlink_target": ""
}
|
import os
import numpy
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('datasets', parent_package, top_path)
config.add_data_dir('data')
config.add_data_dir('descr')
config.add_data_dir('images')
config.add_data_dir(os.path.join('tests', 'data'))
config.add_extension('_svmlight_format',
sources=['_svmlight_format.c'],
include_dirs=[numpy.get_include()])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
{
"content_hash": "cedc6bb8bd6c479ce4d035abb82e1172",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 64,
"avg_line_length": 28.608695652173914,
"alnum_prop": 0.6306990881458967,
"repo_name": "DailyActie/Surrogate-Model",
"id": "793cd2972e828445bd3837a691a1faef448d36e2",
"size": "658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "01-codes/scikit-learn-master/sklearn/datasets/setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "345"
},
{
"name": "Batchfile",
"bytes": "18746"
},
{
"name": "C",
"bytes": "13004913"
},
{
"name": "C++",
"bytes": "14692003"
},
{
"name": "CMake",
"bytes": "72831"
},
{
"name": "CSS",
"bytes": "303488"
},
{
"name": "Fortran",
"bytes": "7339415"
},
{
"name": "HTML",
"bytes": "854774"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "2432846"
},
{
"name": "Jupyter Notebook",
"bytes": "829689"
},
{
"name": "M4",
"bytes": "1379"
},
{
"name": "Makefile",
"bytes": "48708"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PHP",
"bytes": "93585"
},
{
"name": "Pascal",
"bytes": "1449"
},
{
"name": "Perl",
"bytes": "1152272"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "34668203"
},
{
"name": "Roff",
"bytes": "5925"
},
{
"name": "Ruby",
"bytes": "92498"
},
{
"name": "Shell",
"bytes": "94698"
},
{
"name": "TeX",
"bytes": "156540"
},
{
"name": "TypeScript",
"bytes": "41691"
}
],
"symlink_target": ""
}
|
import json
import os
from argparse import ArgumentParser
from collections import Counter
from time import sleep, time
from uuid import uuid4
import requests
from capella.dedicated.CapellaAPI import CapellaAPI as CapellaAPIDedicated
from capella.serverless.CapellaAPI import CapellaAPI as CapellaAPIServerless
from fabric.api import local
from logger import logger
from perfrunner.helpers.misc import maybe_atoi, pretty_dict, remove_nulls
from perfrunner.settings import ClusterSpec, TestConfig
def raise_for_status(resp: requests.Response):
try:
resp.raise_for_status()
except Exception as e:
logger.error('HTTP Error {}: response content: {}'.format(resp.status_code, resp.content))
raise(e)
class Terraform:
# TODO: AWS capacity retry,
# Reset TFs function,
# Swap find and replace for pipe,
# Backup spec update,
# Add support for multiple clusters.
AZURE_IMAGE_URL_PREFIX = '/subscriptions/a5c0936c-5cec-4c8c-85e1-97f5cab644d9/resourceGroups/' \
'perf-resources-eastus/providers/Microsoft.Compute/galleries' \
'/perf_vm_images/images'
IMAGE_MAP = {
'aws': {
'clusters': {
'x86_64': 'perf-server-2022-03-us-east', # ami-005bce54f0c4e2248
'arm': 'perf-server-arm-us-east', # ami-0f249abfe3dd01b30
'al2': 'perf-server-al_x86-2022-03-us-east', # ami-060e286353d227c32
},
'clients': 'perf-client-sgw-cblite', # ami-01b36cb3330d38ac5
'utilities': 'perf-broker-us-east', # ami-0d9e5ee360aa02d94
'sync_gateways': 'perf-server-2022-03-us-east', # ami-005bce54f0c4e2248
},
'gcp': {
'clusters': 'perftest-server-disk-image-1',
'clients': 'perf-client-cblite-disk-image-3',
'utilities': 'perftest-broker-disk-image',
'sync_gateways': 'perftest-server-disk-image-1'
},
'azure': {
'clusters': '{}/perf-server-image-def'.format(AZURE_IMAGE_URL_PREFIX),
'clients': '{}/perf-client-image-def'.format(AZURE_IMAGE_URL_PREFIX),
'utilities': '{}/perf-broker-image-def'.format(AZURE_IMAGE_URL_PREFIX),
'sync_gateways': '{}/perf-server-image-def'.format(AZURE_IMAGE_URL_PREFIX)
}
}
def __init__(self, options):
self.options = options
self.infra_spec = ClusterSpec()
self.infra_spec.parse(self.options.cluster, override=options.override)
self.provider = self.infra_spec.cloud_provider
self.backend = None
self.uuid = uuid4().hex[0:6] if self.provider != 'aws' else None
self.os_arch = self.infra_spec.infrastructure_settings.get('os_arch', 'x86_64')
self.node_list = {
'clusters': [
n for nodes in self.infra_spec.infrastructure_clusters.values()
for n in nodes.strip().split()
],
'clients': self.infra_spec.clients,
'utilities': self.infra_spec.utilities,
'sync_gateways': self.infra_spec.sgw_servers
}
self.cloud_storage = bool(
int(self.infra_spec.infrastructure_settings.get('cloud_storage', 0))
)
if self.provider == 'gcp' or \
(self.provider == 'capella' and self.infra_spec.capella_backend == 'gcp'):
self.zone = self.options.zone
self.region = self.zone.rsplit('-', 1)[0]
else:
self.zone = None
self.region = self.options.region
def deploy(self):
# Configure terraform
self.populate_tfvars()
self.terraform_init(self.provider)
# Deploy resources
self.terraform_apply(self.provider)
# Get info about deployed resources and update cluster spec file
output = self.terraform_output(self.provider)
self.update_spec(output)
def destroy(self):
self.terraform_destroy(self.provider)
def create_tfvar_nodes(self):
tfvar_nodes = {
'clusters': {},
'clients': {},
'utilities': {},
'sync_gateways': {}
}
cloud_provider = self.backend if self.provider == 'capella' else self.provider
for role, nodes in self.node_list.items():
# If this is a capella test, skip cluster nodes
if self.provider == 'capella' and role == 'clusters':
continue
i = 0
for node in nodes:
node_cluster, node_group = node.split(':')[0].split('.', 2)[1:]
parameters = self.infra_spec.infrastructure_config()[node_group.split('.')[0]]
parameters['node_group'] = node_group
# Try getting image name from cli option
image = getattr(self.options, '{}_image'.format({
'clusters': 'cluster',
'clients': 'client',
'utilities': 'utility',
'sync_gateways': 'sgw'
}[role]))
# If image name isn't provided as cli param, use hardcoded defaults
if image is None:
image = self.IMAGE_MAP[cloud_provider][role]
if cloud_provider == 'aws' and role == 'clusters':
image = image.get(self.os_arch, image['x86_64'])
parameters['image'] = image
parameters['volume_size'] = int(parameters.get('volume_size', 0))
storage_class = parameters.get('storage_class', parameters.get('volume_type', None))
if not storage_class:
node_cluster_config = self.infra_spec.infrastructure_section(node_cluster)
storage_class = node_cluster_config.get('storage_class')
parameters['storage_class'] = storage_class
if 'disk_tier' not in parameters and cloud_provider == 'azure':
parameters['disk_tier'] = ""
if cloud_provider in ('aws', 'gcp'):
parameters['iops'] = int(parameters.get('iops', 0))
if cloud_provider == 'aws':
parameters['volume_throughput'] = int(parameters.get('volume_throughput',
0))
del parameters['instance_capacity']
tfvar_nodes[role][str(i := i+1)] = parameters
return tfvar_nodes
def populate_tfvars(self):
cloud_provider = self.backend if self.provider == 'capella' else self.provider
tfvar_nodes = self.create_tfvar_nodes()
replacements = {
'<CLOUD_REGION>': self.region,
'<CLUSTER_NODES>': tfvar_nodes['clusters'],
'<CLIENT_NODES>': tfvar_nodes['clients'],
'<UTILITY_NODES>': tfvar_nodes['utilities'],
'<SYNC_GATEWAY_NODES>': tfvar_nodes['sync_gateways'],
'<CLOUD_STORAGE>': self.cloud_storage,
'<GLOBAL_TAG>': self.options.tag if self.options.tag else ""
}
if self.uuid:
replacements['<UUID>'] = self.uuid
if self.zone:
replacements['<CLOUD_ZONE>'] = self.zone
with open('terraform/{}/terraform.tfvars'.format(cloud_provider), 'r+') as tfvars:
file_string = tfvars.read()
for k, v in replacements.items():
file_string = file_string.replace(k, json.dumps(v, indent=4))
tfvars.seek(0)
tfvars.write(file_string)
# Initializes terraform environment.
def terraform_init(self, provider):
local('cd terraform/{} && terraform init >> terraform.log'.format(provider))
# Apply and output terraform deployment.
def terraform_apply(self, provider):
local('cd terraform/{} && '
'terraform plan -out tfplan.out >> terraform.log && '
'terraform apply -auto-approve tfplan.out'
.format(provider))
def terraform_output(self, provider):
output = json.loads(
local('cd terraform/{} && terraform output -json'.format(provider), capture=True)
)
return output
def terraform_destroy(self, provider):
local('cd terraform/{} && '
'terraform plan -destroy -out tfplan_destroy.out >> terraform.log && '
'terraform apply -auto-approve tfplan_destroy.out'
.format(provider))
# Update spec file with deployed infrastructure.
def update_spec(self, output):
sections = ['clusters', 'clients', 'utilities', 'sync_gateways']
cluster_dicts = [
self.infra_spec.infrastructure_clusters,
self.infra_spec.infrastructure_clients,
self.infra_spec.infrastructure_utilities,
self.infra_spec.infrastructure_sync_gateways
]
output_keys = [
'cluster_instance_ips',
'client_instance_ips',
'utility_instance_ips',
'sync_gateway_instance_ips'
]
private_sections = [
'cluster_private_ips',
'client_private_ips',
'utility_private_ips',
'sync_gateway_private_ips'
]
for section, cluster_dict, output_key, private_section in zip(sections,
cluster_dicts,
output_keys,
private_sections):
if (section not in self.infra_spec.config.sections()) or \
(self.provider == 'capella' and section == 'clusters'):
continue
for cluster, nodes in cluster_dict.items():
node_list = nodes.strip().split()
public_ips = [None for _ in node_list]
private_ips = [None for _ in node_list]
for _, info in output[output_key]['value'].items():
node_group = info['node_group']
for i, node in enumerate(node_list):
hostname, *extras = node.split(':', maxsplit=1)
if hostname.split('.', 2)[-1] == node_group:
public_ip = info['public_ip']
if extras:
public_ip += ':{}'.format(*extras)
public_ips[i] = public_ip
if 'private_ip' in info:
private_ips[i] = info['private_ip']
break
self.infra_spec.config.set(section, cluster, '\n' + '\n'.join(public_ips))
if any(private_ips):
if private_section not in self.infra_spec.config.sections():
self.infra_spec.config.add_section(private_section)
self.infra_spec.config.set(private_section, cluster,
'\n' + '\n'.join(private_ips))
if self.cloud_storage:
bucket_url = output['cloud_storage']['value']['storage_bucket']
self.infra_spec.config.set('storage', 'backup', bucket_url)
if self.provider == 'azure':
storage_acc = output['cloud_storage']['value']['storage_account']
self.infra_spec.config.set('storage', 'storage_acc', storage_acc)
self.infra_spec.update_spec_file()
with open('cloud/infrastructure/cloud.ini', 'r+') as f:
s = f.read()
if self.provider != 'capella':
s = s.replace("server_list", "\n".join(self.infra_spec.servers))
s = s.replace("worker_list", "\n".join(self.infra_spec.clients))
if self.infra_spec.sgw_servers:
s = s.replace("sgw_list", "\n".join(self.infra_spec.sgw_servers))
f.seek(0)
f.write(s)
class CapellaTerraform(Terraform):
SERVICES_CAPELLA_TO_PERFRUNNER = {
'Data': 'kv',
'Analytics': 'cbas',
'Query': 'n1ql',
'Index': 'index',
'Search': 'fts',
'Eventing': 'eventing'
}
SERVICES_PERFRUNNER_TO_CAPELLA = {
'kv': 'data',
'cbas': 'analytics',
'n1ql': 'query',
'index': 'index',
'fts': 'search',
'eventing': 'eventing'
}
def __init__(self, options):
super().__init__(options)
self.test_config = None
if options.test_config:
test_config = TestConfig()
test_config.parse(options.test_config, override=options.override)
self.test_config = test_config
if self.test_config and self.test_config.rebalance_settings.nodes_after:
self.node_list['clusters'] = [
node
for nodes, initial_num in zip(self.infra_spec.infrastructure_clusters.values(),
self.test_config.cluster.initial_nodes)
for node in nodes.strip().split()[:initial_num]
]
self.backend = self.infra_spec.infrastructure_settings['backend']
if public_api_url := self.options.capella_public_api_url:
env = public_api_url.removeprefix('https://')\
.removesuffix('.nonprod-project-avengers.com')\
.split('.', 1)[1]
self.infra_spec.config.set('infrastructure', 'cbc_env', env)
if tenant := self.options.capella_tenant:
self.infra_spec.config.set('infrastructure', 'cbc_tenant', tenant)
if project := self.options.capella_project:
self.infra_spec.config.set('infrastructure', 'cbc_project', project)
self.infra_spec.update_spec_file()
self.tenant_id = self.infra_spec.infrastructure_settings['cbc_tenant']
self.project_id = self.infra_spec.infrastructure_settings['cbc_project']
self.api_client = CapellaAPIDedicated(
'https://cloudapi.{}.nonprod-project-avengers.com'.format(
self.infra_spec.infrastructure_settings['cbc_env']
),
os.getenv('CBC_SECRET_KEY'),
os.getenv('CBC_ACCESS_KEY'),
os.getenv('CBC_USER'),
os.getenv('CBC_PWD')
)
self.use_internal_api = (
(self.options.capella_cb_version and self.options.capella_ami) or self.backend == 'gcp'
)
self.capella_timeout = max(0, self.options.capella_timeout)
def deploy(self):
# Configure terraform
self.populate_tfvars()
self.terraform_init(self.backend)
self.terraform_init('capella')
# Deploy non-capella resources
self.terraform_apply(self.backend)
non_capella_output = self.terraform_output(self.backend)
# Deploy capella cluster
if self.use_internal_api:
cluster_id = self.deploy_cluster_internal_api()
else:
self.terraform_apply('capella')
capella_output = self.terraform_output('capella')
cluster_id = capella_output['cluster_id']['value']
# Update cluster spec file
self.update_spec(non_capella_output, cluster_id)
# Do VPC peering
if self.options.vpc_peering:
network_info = non_capella_output['network']['value']
self.peer_vpc(network_info, cluster_id)
def destroy(self):
# Tear down VPC peering connection
self.destroy_peering_connection()
# Destroy non-capella resources
self.terraform_destroy(self.backend)
# Destroy capella cluster
use_internal_api = self.infra_spec.infrastructure_settings.get('cbc_use_internal_api', 0)
if int(use_internal_api):
cluster_id = self.infra_spec.infrastructure_settings['cbc_cluster']
self.destroy_cluster_internal_api(cluster_id)
else:
self.terraform_destroy('capella')
def populate_tfvars(self):
super().populate_tfvars()
replacements = {
'<UUID>': self.uuid,
'<CLUSTER_SETTINGS>': {
'project_id': self.project_id,
'provider': self.backend,
'region': self.region,
'cidr': self.get_available_cidr()
},
'<SERVER_GROUPS>': [
group for groups in self.create_tfvar_server_groups().values()
for group in groups
]
}
with open('terraform/capella/terraform.tfvars', 'r+') as tfvars:
file_string = tfvars.read()
for k, v in replacements.items():
file_string = file_string.replace(k, json.dumps(v, indent=4))
tfvars.seek(0)
tfvars.write(file_string)
@staticmethod
def capella_server_group_sizes(node_schemas):
server_groups = {}
for node in node_schemas:
name, services = node.split(':')[:2]
_, cluster, node_group, _ = name.split('.')
services_set = tuple(set(services.split(',')))
node_tuple = (node_group, services_set)
if cluster not in server_groups:
server_groups[cluster] = [node_tuple]
else:
server_groups[cluster].append(node_tuple)
server_group_sizes = {
cluster: Counter(node_tuples) for cluster, node_tuples in server_groups.items()
}
return server_group_sizes
@staticmethod
def construct_capella_server_groups(infra_spec, node_schemas):
"""Create correct server group objects for deploying clusters using internal API.
Sample server group template:
```
{
"count": 3,
"services": [
{"type": "kv"},
{"type": "index"}
],
"compute": {
"type": "r5.2xlarge",
"cpu": 0,
"memoryInGb": 0
},
"disk": {
"type": "io2",
"sizeInGb": 50,
"iops": 3000
}
}
```
"""
server_groups = CapellaTerraform.capella_server_group_sizes(node_schemas)
cluster_list = []
for cluster, server_groups in server_groups.items():
server_list = []
cluster_params = infra_spec.infrastructure_section(cluster)
for (node_group, services), size in server_groups.items():
node_group_config = infra_spec.infrastructure_section(node_group)
storage_class = node_group_config.get(
'volume_type', node_group_config.get(
'storage_class', cluster_params.get('storage_class')
)
).lower()
server_group = {
'count': size,
'services': [{'type': svc} for svc in services],
'compute': {
'type': node_group_config['instance_type'],
'cpu': 0,
'memoryInGb': 0
},
'disk': {
'type': storage_class,
'sizeInGb': int(node_group_config['volume_size']),
}
}
if infra_spec.capella_backend == 'aws':
server_group['disk']['iops'] = int(node_group_config.get('iops', 3000))
server_list.append(server_group)
cluster_list.append(server_list)
return cluster_list
def deploy_cluster_internal_api(self):
config = {
"cidr": self.get_available_cidr(),
"name": "perf-cluster-{}".format(self.uuid),
"description": "",
"projectId": self.project_id,
"provider": 'hosted{}'.format(self.infra_spec.capella_backend.upper()),
"region": self.region,
"singleAZ": True,
"server": None,
"specs": self.construct_capella_server_groups(self.infra_spec,
self.node_list['clusters'])[0],
"package": "enterprise"
}
logger.info(config)
if self.options.capella_cb_version and self.options.capella_ami:
config['overRide'] = {
'token': os.getenv('CBC_OVERRIDE_TOKEN'),
'server': self.options.capella_cb_version,
'image': self.options.capella_ami
}
logger.info('Deploying with custom AMI: {}'.format(self.options.capella_ami))
resp = self.api_client.create_cluster_customAMI(self.tenant_id, config)
raise_for_status(resp)
cluster_id = resp.json().get('id')
logger.info('Initialised cluster deployment for cluster {}'.format(cluster_id))
logger.info('Saving cluster ID to spec file.')
self.infra_spec.config.set('infrastructure', 'cbc_cluster', cluster_id)
self.infra_spec.config.set('infrastructure', 'cbc_use_internal_api', "1")
self.infra_spec.update_spec_file()
timeout_mins = self.capella_timeout
interval_secs = 30
status = None
t0 = time()
while (time() - t0) < timeout_mins * 60:
status = self.api_client.get_cluster_status(cluster_id).json().get('status')
logger.info('Cluster state: {}'.format(status))
if status != 'healthy':
sleep(interval_secs)
else:
break
if status != 'healthy':
logger.error('Deployment timed out after {} mins'.format(timeout_mins))
exit(1)
return cluster_id
def destroy_cluster_internal_api(self, cluster_id):
logger.info('Deleting Capella cluster...')
resp = self.api_client.delete_cluster(cluster_id)
raise_for_status(resp)
logger.info('Capella cluster successfully queued for deletion.')
def create_tfvar_server_groups(self) -> list[dict]:
server_group_sizes = CapellaTerraform.capella_server_group_sizes(
self.node_list['clusters']
)
tfvar_server_groups = {cluster: [] for cluster in server_group_sizes}
for cluster, server_groups in server_group_sizes.items():
cluster_params = self.infra_spec.infrastructure_section(cluster)
for (node_group, services), size in server_groups.items():
parameters = self.infra_spec.infrastructure_config()[node_group]
parameters['instance_capacity'] = size
parameters['services'] = [
self.SERVICES_PERFRUNNER_TO_CAPELLA[svc]for svc in services
]
storage_class = parameters.pop(
'volume_type', parameters.get(
'storage_class', cluster_params.get('storage_class')
)
).upper()
parameters['storage_class'] = storage_class
parameters['volume_size'] = int(parameters['volume_size'])
parameters['iops'] = int(parameters.get('iops', 0))
tfvar_server_groups[cluster].append(parameters)
return tfvar_server_groups
def get_available_cidr(self):
resp = self.api_client.get_deployment_options(self.tenant_id)
return resp.json().get('suggestedCidr')
def get_deployed_cidr(self, cluster_id):
resp = self.api_client.get_cluster_info(cluster_id)
return resp.json().get('place', {}).get('CIDR')
def get_hostnames(self, cluster_id):
resp = self.api_client.get_nodes(tenant_id=self.tenant_id,
project_id=self.project_id,
cluster_id=cluster_id)
nodes = resp.json()['data']
nodes = [node['data'] for node in nodes]
services_per_node = {node['hostname']: node['services'] for node in nodes}
kv_nodes = []
non_kv_nodes = []
for hostname, services in services_per_node.items():
services_string = ','.join(self.SERVICES_CAPELLA_TO_PERFRUNNER[svc] for svc in services)
if 'kv' in services_string:
kv_nodes.append("{}:{}".format(hostname, services_string))
else:
non_kv_nodes.append("{}:{}".format(hostname, services_string))
ret_list = kv_nodes + non_kv_nodes
return ret_list
def update_spec(self, non_capella_output, cluster_id):
super().update_spec(non_capella_output)
self.infra_spec.config.add_section('clusters_schemas')
for option, value in self.infra_spec.infrastructure_clusters.items():
self.infra_spec.config.set('clusters_schemas', option, value)
hostnames = self.get_hostnames(cluster_id)
cluster = self.infra_spec.config.options('clusters')[0]
self.infra_spec.config.set('clusters', cluster, '\n' + '\n'.join(hostnames))
self.infra_spec.config.set('infrastructure', 'cbc_cluster', cluster_id)
if self.use_internal_api:
self.infra_spec.config.set('infrastructure', 'cbc_use_internal_api', "1")
self.infra_spec.update_spec_file()
def peer_vpc(self, network_info, cluster_id):
logger.info('Setting up VPC peering...')
if self.infra_spec.capella_backend == 'aws':
peering_connection = self._peer_vpc_aws(network_info, cluster_id)
elif self.infra_spec.capella_backend == 'gcp':
peering_connection, dns_managed_zone, client_vpc = self._peer_vpc_gcp(network_info,
cluster_id)
self.infra_spec.config.set('infrastructure', 'dns_managed_zone', dns_managed_zone)
self.infra_spec.config.set('infrastructure', 'client_vpc', client_vpc)
if peering_connection:
self.infra_spec.config.set('infrastructure', 'peering_connection', peering_connection)
self.infra_spec.update_spec_file()
else:
exit(1)
def _peer_vpc_aws(self, network_info, cluster_id) -> str:
# Initiate VPC peering
logger.info('Initiating peering')
client_vpc = network_info['vpc_id']
cidr = network_info['subnet_cidr']
route_table = network_info['route_table_id']
cluster_cidr = self.get_deployed_cidr(cluster_id)
logger.info('Adding Capella private network (AWS): VPC ID = {}'.format(client_vpc))
account_id = local('AWS_PROFILE=default env/bin/aws sts get-caller-identity '
'--query Account --output text',
capture=True)
data = {
"name": "perftest-network",
"aws": {
"accountId": account_id,
"vpcId": client_vpc,
"region": self.region,
"cidr": cidr
},
"provider": "aws"
}
peering_connection_id = None
try:
resp = self.api_client.create_private_network(
self.tenant_id, self.project_id, cluster_id, data)
private_network_id = resp.json()['id']
# Get AWS CLI commands that we need to run to complete the peering process
logger.info('Accepting peering request')
resp = self.api_client.get_private_network(
self.tenant_id, self.project_id, cluster_id, private_network_id)
aws_commands = resp.json()['data']['commands']
peering_connection_id = resp.json()['data']['aws']['providerId']
# Finish peering process using AWS CLI
for command in aws_commands:
local("AWS_PROFILE=default env/bin/{}".format(command))
# Finally, set up route table in our client VPC
logger.info('Configuring route table in client VPC')
local(
(
"AWS_PROFILE=default env/bin/aws --region {} ec2 create-route "
"--route-table-id {} "
"--destination-cidr-block {} "
"--vpc-peering-connection-id {}"
).format(self.region, route_table, cluster_cidr, peering_connection_id)
)
except Exception as e:
logger.error('Failed to complete VPC peering: {}'.format(e))
return peering_connection_id
def _peer_vpc_gcp(self, network_info, cluster_id) -> tuple[str, str]:
# Initiate VPC peering
logger.info('Initiating peering')
client_vpc = network_info['vpc_id']
cidr = network_info['subnet_cidr']
service_account = local("gcloud config get account", capture=True)
logger.info('Adding Capella private network (GCP): VPC name = {}'.format(client_vpc))
project_id = local('gcloud config get project', capture=True)
data = {
"name": "perftest-network",
"gcp": {
"projectId": project_id,
"networkName": client_vpc,
"cidr": cidr,
"serviceAccount": service_account
},
"provider": "gcp"
}
peering_connection_name = None
dns_managed_zone_name = None
try:
resp = self.api_client.create_private_network(
self.tenant_id, self.project_id, cluster_id, data)
private_network_id = resp.json()['id']
# Get gcloud commands that we need to run to complete the peering process
logger.info('Accepting peering request')
resp = self.api_client.get_private_network(
self.tenant_id, self.project_id, cluster_id, private_network_id)
gcloud_commands = resp.json()['data']['commands']
# Finish peering process using gcloud
for command in gcloud_commands:
local(command)
peering_connection_name, capella_vpc_uri = local(
(
'gcloud compute networks peerings list '
'--network={} '
'--format="value(peerings[].name,peerings[].network)"'
).format(client_vpc),
capture=True
).split()
dns_managed_zone_name = local(
(
'gcloud dns managed-zones list '
'--filter="(peeringConfig.targetNetwork.networkUrl = {})" '
'--format="value(name)"'
).format(capella_vpc_uri),
capture=True
)
except Exception as e:
logger.error('Failed to complete VPC peering: {}'.format(e))
return peering_connection_name, dns_managed_zone_name, client_vpc
def destroy_peering_connection(self):
logger.info("Destroying peering connection...")
if self.infra_spec.capella_backend == 'aws':
self._destroy_peering_connection_aws()
elif self.infra_spec.capella_backend == 'gcp':
self._destroy_peering_connection_gcp()
def _destroy_peering_connection_aws(self):
peering_connection = self.infra_spec.infrastructure_settings.get('peering_connection', None)
if not peering_connection:
logger.warn('No peering connection ID found in cluster spec; nothing to destroy.')
return
local(
(
"AWS_PROFILE=default env/bin/aws "
"--region {} ec2 delete-vpc-peering-connection "
"--vpc-peering-connection-id {}"
).format(self.region, peering_connection)
)
def _destroy_peering_connection_gcp(self):
peering_connection = self.infra_spec.infrastructure_settings.get('peering_connection', None)
if not peering_connection:
logger.warn('No peering connection ID found in cluster spec; nothing to destroy.')
return
dns_managed_zone = self.infra_spec.infrastructure_settings['dns_managed_zone']
client_vpc = self.infra_spec.infrastructure_settings['client_vpc']
local('gcloud compute networks peerings delete {} --network={}'
.format(peering_connection, client_vpc))
local('gcloud dns managed-zones delete {}'.format(dns_managed_zone))
class ServerlessTerraform(CapellaTerraform):
NEBULA_OVERRIDE_ARGS = ['override_count', 'min_count', 'max_count', 'instance_type']
def __init__(self, options):
Terraform.__init__(self, options)
if not options.test_config:
logger.error('Test config required if deploying serverless infrastructure.')
exit(1)
test_config = TestConfig()
test_config.parse(options.test_config, override=options.override)
self.test_config = test_config
for prefix, section in {'dapi': 'data_api', 'nebula': 'direct_nebula'}.items():
for arg in self.NEBULA_OVERRIDE_ARGS:
if (value := getattr(options, prefix + '_' + arg)) is not None:
self.infra_spec.config.set(section, arg, str(value))
self.infra_spec.update_spec_file()
self.backend = self.infra_spec.infrastructure_settings['backend']
if public_api_url := self.options.capella_public_api_url:
env = public_api_url.removeprefix('https://')\
.removesuffix('.nonprod-project-avengers.com')\
.split('.', 1)[1]
self.infra_spec.config.set('infrastructure', 'cbc_env', env)
self.infra_spec.update_spec_file()
else:
public_api_url = 'https://cloudapi.{}.nonprod-project-avengers.com'.format(
self.infra_spec.infrastructure_settings['cbc_env']
)
self.serverless_client = CapellaAPIServerless(
public_api_url,
os.getenv('CBC_USER'),
os.getenv('CBC_PWD'),
os.getenv('CBC_TOKEN_FOR_INTERNAL_SUPPORT')
)
self.dedicated_client = CapellaAPIDedicated(
public_api_url,
None, # Don't need access key and secret key for creating a project or getting tenant
None, # IDs (which are the only things we need it for)
os.getenv('CBC_USER'),
os.getenv('CBC_PWD')
)
self.tenant_id = self.infra_spec.infrastructure_settings.get('cbc_tenant', None)
self.project_id = self.infra_spec.infrastructure_settings.get('cbc_project', None)
self.dp_id = self.infra_spec.infrastructure_settings.get('cbc_cluster', None)
self.cluster_id = None
if self.tenant_id is None:
self.tenant_id = self.get_tenant_id()
def get_tenant_id(self):
logger.info('Getting tenant ID...')
resp = self.dedicated_client.list_accessible_tenants()
raise_for_status(resp)
tenant_id = resp.json()[0]['id']
self.infra_spec.config.set('infrastructure', 'cbc_tenant', tenant_id)
self.infra_spec.update_spec_file()
logger.info('Found tenant ID: {}'.format(tenant_id))
return tenant_id
def deploy(self):
# Configure terraform
Terraform.populate_tfvars(self)
self.terraform_init(self.backend)
# Deploy non-capella resources
self.terraform_apply(self.backend)
non_capella_output = self.terraform_output(self.backend)
Terraform.update_spec(self, non_capella_output)
# Deploy serverless dataplane + databases
self.deploy_serverless_dataplane()
self.create_project()
self.create_serverless_dbs()
self.update_spec()
def destroy(self):
# Destroy non-capella resources
self.terraform_destroy(self.backend)
# Destroy capella cluster
if self.dp_id:
self.destroy_serverless_databases()
self.destroy_serverless_dataplane()
else:
logger.warn('No serverless dataplane ID found. Not destroying serverless dataplane.')
if self.tenant_id and self.project_id:
self.destroy_project()
else:
logger.warn('No tenant ID or project ID found. Not destroying project.')
def deploy_serverless_dataplane(self):
nebula_config = self.infra_spec.direct_nebula
dapi_config = self.infra_spec.data_api
config = remove_nulls({
"provider": "aws",
"region": self.region,
'overRide': {
'couchbase': {
'image': self.options.capella_ami,
'version': self.options.capella_cb_version,
'specs': (
specs[0]
if (specs := self.construct_capella_server_groups(
self.infra_spec, self.node_list['clusters']
))
else None
)
},
'nebula': {
'image': self.options.nebula_ami,
'compute': {
'type': nebula_config.get('instance_type', None),
'count': {
'min': maybe_atoi(nebula_config.get('min_count', '')),
'max': maybe_atoi(nebula_config.get('max_count', '')),
'overRide': maybe_atoi(nebula_config.get('override_count', ''))
}
}
},
'dataApi': {
'image': self.options.dapi_ami,
'compute': {
'type': dapi_config.get('instance_type', None),
'count': {
'min': maybe_atoi(dapi_config.get('min_count', '')),
'max': maybe_atoi(dapi_config.get('max_count', '')),
'overRide': maybe_atoi(dapi_config.get('override_count', ''))
}
}
}
}
})
logger.info(pretty_dict(config))
resp = self.serverless_client.create_serverless_dataplane(config)
raise_for_status(resp)
dp_id = resp.json().get('dataplaneId')
logger.info('Initialised deployment for serverless dataplane {}'.format(dp_id))
logger.info('Saving dataplane ID to spec file.')
resp = self.serverless_client.get_dataplane_deployment_status(dp_id)
raise_for_status(resp)
status = resp.json()['status']['state']
self.cluster_id = resp.json()['couchbaseCluster']['id']
if not self.options.enable_autoscaling:
self.disable_autoscaling()
self.dp_id = dp_id
self.infra_spec.config.set('infrastructure', 'cbc_cluster', dp_id)
self.infra_spec.update_spec_file()
timeout_mins = self.options.capella_timeout
interval_secs = 30
status = None
t0 = time()
while (time() - t0) < timeout_mins * 60 and status != 'ready':
resp = self.serverless_client.get_dataplane_deployment_status(dp_id)
raise_for_status(resp)
status = resp.json()['status']['state']
logger.info('Dataplane state: {}'.format(status))
if status != 'ready':
sleep(interval_secs)
if status != 'ready':
logger.error('Deployment timed out after {} mins'.format(timeout_mins))
exit(1)
resp = self.serverless_client.get_serverless_dataplane_info(dp_id)
raise_for_status(resp)
logger.info('Deployed dataplane info: {}'.format(pretty_dict(resp.json())))
def disable_autoscaling(self):
logger.info('Creating deployment circuit breaker to prevent auto-scaling.')
resp = self.serverless_client.create_circuit_breaker(self.cluster_id)
raise_for_status(resp)
resp = self.serverless_client.get_circuit_breaker(self.cluster_id)
raise_for_status(resp)
logger.info('Circuit breaker created: {}'.format(pretty_dict(resp.json())))
def _create_db(self, name, width=1, weight=30):
logger.info('Adding new serverless DB: {}'.format(name))
data = {
'name': name,
'tenantId': self.tenant_id,
'projectId': self.project_id,
'provider': self.backend,
'region': self.region,
'overRide': {
'width': width,
'weight': weight,
'dataplaneId': self.dp_id
},
'dontImportSampleData': True
}
logger.info('DB configuration: {}'.format(pretty_dict(data)))
resp = self.serverless_client.create_serverless_database_overRide(data)
raise_for_status(resp)
return resp.json()
def _get_db_info(self, db_id):
resp = self.serverless_client.get_database_debug_info(db_id)
raise_for_status(resp)
return resp.json()
def create_serverless_dbs(self):
dbs = {}
if not (init_db_map := self.test_config.serverless_db.init_db_map):
init_db_map = {
'bucket-{}'.format(i+1): {'width': 1, 'weight': 30}
for i in range(self.test_config.cluster.num_buckets)
}
for db_name, params in init_db_map.items():
resp = self._create_db(db_name, params['width'], params['weight'])
db_id = resp['databaseId']
logger.info('Database ID for {}: {}'.format(db_name, db_id))
dbs[db_id] = {
'name': db_name,
'width': params['width'],
'weight': params['weight'],
'nebula_uri': None,
'dapi_uri': None,
'access': None,
'secret': None
}
self.test_config.serverless_db.update_db_map(dbs)
timeout_mins = 20
interval_secs = 10
t0 = time()
db_ids = list(dbs.keys())
while db_ids and time() - t0 < timeout_mins * 60:
for db_id in db_ids:
db_info = self._get_db_info(db_id)['database']
db_state = db_info['status']['state']
logger.info('{} state: {}'.format(db_id, db_state))
if db_state == 'ready':
logger.info('Serverless DB deployed: {}'.format(db_id))
db_ids.remove(db_id)
dbs[db_id]['nebula_uri'] = db_info['connect']['sdk']
dbs[db_id]['dapi_uri'] = db_info['connect']['dataApi']
if db_ids:
sleep(interval_secs)
self.test_config.serverless_db.update_db_map(dbs)
if db_ids:
logger.error('Serverless DB deployment timed out after {} mins'.format(timeout_mins))
exit(1)
else:
logger.info('All serverless DBs deployed')
def update_spec(self):
db_id = self.test_config.buckets[0]
resp = self.serverless_client.get_database_debug_info(db_id)
raise_for_status(resp)
dp_info = resp.json()
logger.info('Dataplane config: {}'.format(pretty_dict(dp_info['dataplane'])))
resp = self.serverless_client.get_access_to_serverless_dataplane_nodes(self.dp_id)
raise_for_status(resp)
dp_creds = resp.json()
hostname = dp_info['dataplane']['couchbase']['nodes'][0]['hostname']
auth = (
dp_creds['couchbaseCreds']['username'],
dp_creds['couchbaseCreds']['password']
)
self.infra_spec.config.set('credentials', 'rest', ':'.join(auth).replace('%', '%%'))
default_pool = self.get_default_pool(hostname, auth)
nodes = []
for node in default_pool['nodes']:
hostname = node['hostname'].removesuffix(':8091')
services = sorted(node['services'], key=lambda s: s != 'kv')
services_str = ','.join(services)
group = node['serverGroup'].removeprefix('group:')
nodes.append((hostname, services_str, group))
nodes = sorted(nodes, key=lambda n: n[2])
nodes = [':'.join(n) for n in sorted(nodes, key=lambda n: '' if 'kv' in n[1] else n[1])]
node_string = '\n'.join(nodes)
self.infra_spec.config.set('clusters', 'serverless', node_string)
self.infra_spec.update_spec_file()
def get_default_pool(self, hostname, auth):
session = requests.Session()
resp = session.get('https://{}:18091/pools/default'.format(hostname),
auth=auth, verify=False)
raise_for_status(resp)
return resp.json()
def create_project(self):
logger.info('Creating project for serverless DBs')
resp = self.dedicated_client.create_project(
self.tenant_id,
self.options.tag or 'perf-{}'.format(uuid4().hex[:6])
)
raise_for_status(resp)
project_id = resp.json()['id']
self.project_id = project_id
self.infra_spec.config.set('infrastructure', 'cbc_project', project_id)
self.infra_spec.update_spec_file()
logger.info('Project created: {}'.format(project_id))
def _destroy_db(self, db_id):
logger.info('Destroying serverless DB {}'.format(db_id))
resp = self.serverless_client.delete_database(self.tenant_id, self.project_id, db_id)
raise_for_status(resp)
logger.info('Serverless DB destroyed: {}'.format(db_id))
def destroy_serverless_databases(self):
logger.info('Deleting all serverless databases...')
for db_id in self.test_config.serverless_db.db_map:
self._destroy_db(db_id)
logger.info('All serverless databases destroyed.')
def destroy_serverless_dataplane(self):
logger.info('Deleting serverless dataplane...')
while (resp := self.serverless_client.delete_dataplane(self.dp_id)).status_code == 422:
logger.info("Waiting for databases to be fully deleted...")
sleep(5)
raise_for_status(resp)
logger.info('Serverless dataplane successfully queued for deletion.')
def destroy_project(self):
logger.info('Deleting project...')
resp = self.dedicated_client.delete_project(self.tenant_id, self.project_id)
raise_for_status(resp)
logger.info('Project successfully queued for deletion.')
class EKSTerraform(Terraform):
pass
# CLI args.
def get_args():
parser = ArgumentParser()
parser.add_argument('-c', '--cluster',
required=True,
help='the path to a infrastructure specification file')
parser.add_argument('--test-config',
required=False,
help='the path to the test configuration file')
parser.add_argument('--verbose',
action='store_true',
help='enable verbose logging')
parser.add_argument('-r', '--region',
choices=[
'us-east-1',
'us-east-2',
'us-west-2',
'ca-central-1',
'ap-northeast-1',
'ap-northeast-2',
'ap-southeast-1',
'ap-south-1',
'eu-west-1',
'eu-west-2',
'eu-west-3',
'eu-central-1',
'sa-east-1'
],
default='us-east-1',
help='the cloud region (AWS)')
parser.add_argument('-z', '--zone',
choices=[
'us-central1-a',
'us-central1-b',
'us-central1-c'
'us-central1-f',
'us-west1-a',
'us-west1-b',
'us-west1-c'
],
default='us-west1-b',
help='the cloud zone (GCP)')
parser.add_argument('--cluster-image',
help='Image/AMI name to use for cluster nodes')
parser.add_argument('--client-image',
help='Image/AMI name to use for client nodes')
parser.add_argument('--utility-image',
help='Image/AMI name to use for utility nodes')
parser.add_argument('--sgw-image',
help='Image/AMI name to use for sync gateway nodes')
parser.add_argument('--capella-public-api-url',
help='public API URL for Capella environment')
parser.add_argument('--capella-tenant',
help='tenant ID for Capella deployment')
parser.add_argument('--capella-project',
help='project ID for Capella deployment')
parser.add_argument('--capella-cb-version',
help='cb version to use for Capella deployment')
parser.add_argument('--capella-ami',
help='custom AMI to use for Capella deployment')
parser.add_argument('--dapi-ami',
help='AMI to use for Data API deployment (serverless)')
parser.add_argument('--dapi-override-count',
type=int,
help='number of DAPI nodes to deploy')
parser.add_argument('--dapi-min-count',
type=int,
help='minimum number of DAPI nodes in autoscaling group')
parser.add_argument('--dapi-max-count',
type=int,
help='maximum number of DAPI nodes in autoscaling group')
parser.add_argument('--dapi-instance-type',
help='instance type to use for DAPI nodes')
parser.add_argument('--nebula-ami',
help='AMI to use for Direct Nebula deployment (serverless)')
parser.add_argument('--nebula-override-count',
type=int,
help='number of Direct Nebula nodes to deploy')
parser.add_argument('--nebula-min-count',
type=int,
help='minimum number of Direct Nebula nodes in autoscaling group')
parser.add_argument('--nebula-max-count',
type=int,
help='maximum number of Direct Nebula nodes in autoscaling group')
parser.add_argument('--nebula-instance-type',
help='instance type to use for Direct Nebula nodes')
parser.add_argument('--vpc-peering',
action='store_true',
help='enable VPC peering for Capella deployment')
parser.add_argument('--capella-timeout',
type=int,
default=20,
help='Timeout (minutes) for Capella deployment when using internal API')
parser.add_argument('--enable-autoscaling',
action='store_true',
help='Enable cluster auto-scaling for serverless dataplanes. If not set, '
'a deployment circuit breaker will be created for the cluster.')
parser.add_argument('-t', '--tag',
help='Global tag for launched instances.')
parser.add_argument('override',
nargs='*',
help='custom cluster and/or test settings')
return parser.parse_args()
def destroy():
args = get_args()
infra_spec = ClusterSpec()
infra_spec.parse(fname=args.cluster, override=args.override)
if infra_spec.cloud_provider != 'capella':
deployer = Terraform(args)
elif infra_spec.serverless_infrastructure:
deployer = ServerlessTerraform(args)
else:
deployer = CapellaTerraform(args)
deployer.destroy()
def main():
args = get_args()
infra_spec = ClusterSpec()
infra_spec.parse(fname=args.cluster, override=args.override)
if infra_spec.cloud_provider != 'capella':
deployer = Terraform(args)
elif infra_spec.serverless_infrastructure:
deployer = ServerlessTerraform(args)
else:
deployer = CapellaTerraform(args)
deployer.deploy()
|
{
"content_hash": "31a18709a3bf189ca4c97a64f2dceee7",
"timestamp": "",
"source": "github",
"line_count": 1322,
"max_line_length": 100,
"avg_line_length": 39.80105900151286,
"alnum_prop": 0.5440637056464641,
"repo_name": "couchbase/perfrunner",
"id": "d24660071b897af8a1883f1c77614c413a60aed6",
"size": "52639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "perfrunner/utils/terraform.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1853"
},
{
"name": "Dockerfile",
"bytes": "2761"
},
{
"name": "Go",
"bytes": "37531"
},
{
"name": "Groovy",
"bytes": "46365"
},
{
"name": "HCL",
"bytes": "40219"
},
{
"name": "Inno Setup",
"bytes": "25281"
},
{
"name": "JavaScript",
"bytes": "14317"
},
{
"name": "Makefile",
"bytes": "2405"
},
{
"name": "Python",
"bytes": "2416900"
},
{
"name": "Ruby",
"bytes": "154"
},
{
"name": "Shell",
"bytes": "5016"
}
],
"symlink_target": ""
}
|
"""Projects manager (empower class)."""
|
{
"content_hash": "aec80fb9759d68e5015fd4a11aa72696",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 39,
"avg_line_length": 40,
"alnum_prop": 0.675,
"repo_name": "5g-empower/empower-runtime",
"id": "609791498a9a097db5573d1e450a37fb9ee501b9",
"size": "649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "empower/managers/projectsmanager/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "77777"
},
{
"name": "JavaScript",
"bytes": "210914"
},
{
"name": "Python",
"bytes": "499223"
}
],
"symlink_target": ""
}
|
"""
Tests for Block Device utility functions.
"""
from nova import block_device
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import matchers
from nova.tests import uuidsentinel as uuids
class BlockDeviceTestCase(test.NoDBTestCase):
def setUp(self):
super(BlockDeviceTestCase, self).setUp()
BDM = block_device.BlockDeviceDict
self.new_mapping = [
BDM({'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'volume_size': 1,
'guest_format': 'swap',
'boot_index': -1}),
BDM({'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'volume_size': 10,
'delete_on_termination': True,
'boot_index': -1}),
BDM({'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'connection_info': "{'fake': 'connection_info'}",
'boot_index': 0}),
BDM({'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1}),
BDM({'id': 5, 'instance_uuid': uuids.instance,
'no_device': True,
'device_name': '/dev/vdc'}),
]
def test_properties(self):
root_device0 = '/dev/sda'
root_device1 = '/dev/sdb'
mappings = [{'virtual': 'root',
'device': root_device0}]
properties0 = {'mappings': mappings}
properties1 = {'mappings': mappings,
'root_device_name': root_device1}
self.assertIsNone(block_device.properties_root_device_name({}))
self.assertEqual(root_device0,
block_device.properties_root_device_name(properties0))
self.assertEqual(root_device1,
block_device.properties_root_device_name(properties1))
def test_ephemeral(self):
self.assertFalse(block_device.is_ephemeral('ephemeral'))
self.assertTrue(block_device.is_ephemeral('ephemeral0'))
self.assertTrue(block_device.is_ephemeral('ephemeral1'))
self.assertTrue(block_device.is_ephemeral('ephemeral11'))
self.assertFalse(block_device.is_ephemeral('root'))
self.assertFalse(block_device.is_ephemeral('swap'))
self.assertFalse(block_device.is_ephemeral('/dev/sda1'))
self.assertEqual(0, block_device.ephemeral_num('ephemeral0'))
self.assertEqual(1, block_device.ephemeral_num('ephemeral1'))
self.assertEqual(11, block_device.ephemeral_num('ephemeral11'))
self.assertFalse(block_device.is_swap_or_ephemeral('ephemeral'))
self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral0'))
self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral1'))
self.assertTrue(block_device.is_swap_or_ephemeral('swap'))
self.assertFalse(block_device.is_swap_or_ephemeral('root'))
self.assertFalse(block_device.is_swap_or_ephemeral('/dev/sda1'))
def test_mappings_prepend_dev(self):
mapping = [
{'virtual': 'ami', 'device': '/dev/sda'},
{'virtual': 'root', 'device': 'sda'},
{'virtual': 'ephemeral0', 'device': 'sdb'},
{'virtual': 'swap', 'device': 'sdc'},
{'virtual': 'ephemeral1', 'device': 'sdd'},
{'virtual': 'ephemeral2', 'device': 'sde'}]
expected = [
{'virtual': 'ami', 'device': '/dev/sda'},
{'virtual': 'root', 'device': 'sda'},
{'virtual': 'ephemeral0', 'device': '/dev/sdb'},
{'virtual': 'swap', 'device': '/dev/sdc'},
{'virtual': 'ephemeral1', 'device': '/dev/sdd'},
{'virtual': 'ephemeral2', 'device': '/dev/sde'}]
prepended = block_device.mappings_prepend_dev(mapping)
self.assertEqual(sorted(expected, key=lambda v: v['virtual']),
sorted(prepended, key=lambda v: v['virtual']))
def test_strip_dev(self):
self.assertEqual('sda', block_device.strip_dev('/dev/sda'))
self.assertEqual('sda', block_device.strip_dev('sda'))
self.assertIsNone(block_device.strip_dev(None))
def test_strip_prefix(self):
self.assertEqual('a', block_device.strip_prefix('/dev/sda'))
self.assertEqual('a', block_device.strip_prefix('a'))
self.assertEqual('a', block_device.strip_prefix('xvda'))
self.assertEqual('a', block_device.strip_prefix('vda'))
self.assertEqual('a', block_device.strip_prefix('hda'))
self.assertIsNone(block_device.strip_prefix(None))
def test_get_device_letter(self):
self.assertEqual('', block_device.get_device_letter(''))
self.assertEqual('a', block_device.get_device_letter('/dev/sda1'))
self.assertEqual('b', block_device.get_device_letter('/dev/xvdb'))
self.assertEqual('d', block_device.get_device_letter('/dev/d'))
self.assertEqual('a', block_device.get_device_letter('a'))
self.assertEqual('b', block_device.get_device_letter('sdb2'))
self.assertEqual('c', block_device.get_device_letter('vdc'))
self.assertEqual('c', block_device.get_device_letter('hdc'))
self.assertIsNone(block_device.get_device_letter(None))
def test_volume_in_mapping(self):
swap = {'device_name': '/dev/sdb',
'swap_size': 1}
ephemerals = [{'num': 0,
'virtual_name': 'ephemeral0',
'device_name': '/dev/sdc1',
'size': 1},
{'num': 2,
'virtual_name': 'ephemeral2',
'device_name': '/dev/sdd',
'size': 1}]
block_device_mapping = [{'mount_device': '/dev/sde',
'device_path': 'fake_device'},
{'mount_device': '/dev/sdf',
'device_path': 'fake_device'}]
block_device_info = {
'root_device_name': '/dev/sda',
'swap': swap,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
def _assert_volume_in_mapping(device_name, true_or_false):
in_mapping = block_device.volume_in_mapping(
device_name, block_device_info)
self.assertEqual(true_or_false, in_mapping)
_assert_volume_in_mapping('sda', False)
_assert_volume_in_mapping('sdb', True)
_assert_volume_in_mapping('sdc1', True)
_assert_volume_in_mapping('sdd', True)
_assert_volume_in_mapping('sde', True)
_assert_volume_in_mapping('sdf', True)
_assert_volume_in_mapping('sdg', False)
_assert_volume_in_mapping('sdh1', False)
def test_get_root_bdm(self):
root_bdm = {'device_name': 'vda', 'boot_index': 0}
bdms = [root_bdm,
{'device_name': 'vdb', 'boot_index': 1},
{'device_name': 'vdc', 'boot_index': -1},
{'device_name': 'vdd'}]
self.assertEqual(root_bdm, block_device.get_root_bdm(bdms))
self.assertEqual(root_bdm, block_device.get_root_bdm([bdms[0]]))
self.assertIsNone(block_device.get_root_bdm(bdms[1:]))
self.assertIsNone(block_device.get_root_bdm(bdms[2:]))
self.assertIsNone(block_device.get_root_bdm(bdms[3:]))
self.assertIsNone(block_device.get_root_bdm([]))
def test_get_bdm_ephemeral_disk_size(self):
size = block_device.get_bdm_ephemeral_disk_size(self.new_mapping)
self.assertEqual(10, size)
def test_get_bdm_swap_list(self):
swap_list = block_device.get_bdm_swap_list(self.new_mapping)
self.assertEqual(1, len(swap_list))
self.assertEqual(1, swap_list[0].get('id'))
def test_get_bdm_local_disk_num(self):
size = block_device.get_bdm_local_disk_num(self.new_mapping)
self.assertEqual(2, size)
def test_new_format_is_swap(self):
expected_results = [True, False, False, False, False]
for expected, bdm in zip(expected_results, self.new_mapping):
res = block_device.new_format_is_swap(bdm)
self.assertEqual(expected, res)
def test_new_format_is_ephemeral(self):
expected_results = [False, True, False, False, False]
for expected, bdm in zip(expected_results, self.new_mapping):
res = block_device.new_format_is_ephemeral(bdm)
self.assertEqual(expected, res)
def test_validate_device_name(self):
for value in [' ', 10, None, 'a' * 260]:
self.assertRaises(exception.InvalidBDMFormat,
block_device.validate_device_name,
value)
def test_validate_and_default_volume_size(self):
bdm = {}
for value in [-1, 'a', 2.5]:
bdm['volume_size'] = value
self.assertRaises(exception.InvalidBDMFormat,
block_device.validate_and_default_volume_size,
bdm)
def test_get_bdms_to_connect(self):
root_bdm = {'device_name': 'vda', 'boot_index': 0}
bdms = [root_bdm,
{'device_name': 'vdb', 'boot_index': 1},
{'device_name': 'vdc', 'boot_index': -1},
{'device_name': 'vde', 'boot_index': None},
{'device_name': 'vdd'}]
self.assertNotIn(root_bdm, block_device.get_bdms_to_connect(bdms,
exclude_root_mapping=True))
self.assertIn(root_bdm, block_device.get_bdms_to_connect(bdms))
class TestBlockDeviceDict(test.NoDBTestCase):
def setUp(self):
super(TestBlockDeviceDict, self).setUp()
BDM = block_device.BlockDeviceDict
self.api_mapping = [
{'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'boot_index': -1},
{'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'boot_index': -1},
{'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'uuid': 'fake-volume-id-1',
'boot_index': 0},
{'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'uuid': 'fake-snapshot-id-1',
'boot_index': -1},
{'id': 5, 'instance_uuid': uuids.instance,
'no_device': True,
'device_name': '/dev/vdc'},
]
self.new_mapping = [
BDM({'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'boot_index': -1}),
BDM({'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'boot_index': -1}),
BDM({'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'connection_info': "{'fake': 'connection_info'}",
'boot_index': 0}),
BDM({'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1}),
BDM({'id': 5, 'instance_uuid': uuids.instance,
'no_device': True,
'device_name': '/dev/vdc'}),
]
self.legacy_mapping = [
{'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'delete_on_termination': True,
'virtual_name': 'swap'},
{'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'delete_on_termination': True,
'virtual_name': 'ephemeral0'},
{'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'volume_id': 'fake-volume-id-1',
'connection_info': "{'fake': 'connection_info'}"},
{'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2'},
{'id': 5, 'instance_uuid': uuids.instance,
'no_device': True,
'device_name': '/dev/vdc'},
]
self.new_mapping_source_image = [
BDM({'id': 6, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda3',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'volume_id': 'fake-volume-id-3',
'boot_index': -1}),
BDM({'id': 7, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda4',
'source_type': 'image',
'destination_type': 'local',
'connection_info': "{'fake': 'connection_info'}",
'image_id': 'fake-image-id-2',
'boot_index': -1}),
]
self.legacy_mapping_source_image = [
{'id': 6, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda3',
'connection_info': "{'fake': 'connection_info'}",
'volume_id': 'fake-volume-id-3'},
]
def test_init(self):
def fake_validate(obj, dct):
pass
self.stub_out('nova.block_device.BlockDeviceDict._fields',
set(['field1', 'field2']))
self.stub_out('nova.block_device.BlockDeviceDict._db_only_fields',
set(['db_field1', 'db_field2']))
self.stub_out('nova.block_device.BlockDeviceDict._validate',
fake_validate)
# Make sure db fields are not picked up if they are not
# in the original dict
dev_dict = block_device.BlockDeviceDict({'field1': 'foo',
'field2': 'bar',
'db_field1': 'baz'})
self.assertIn('field1', dev_dict)
self.assertIn('field2', dev_dict)
self.assertIn('db_field1', dev_dict)
self.assertNotIn('db_field2', dev_dict)
# Make sure all expected fields are defaulted
dev_dict = block_device.BlockDeviceDict({'field1': 'foo'})
self.assertIn('field1', dev_dict)
self.assertIn('field2', dev_dict)
self.assertIsNone(dev_dict['field2'])
self.assertNotIn('db_field1', dev_dict)
self.assertNotIn('db_field2', dev_dict)
# Unless they are not meant to be
dev_dict = block_device.BlockDeviceDict({'field1': 'foo'},
do_not_default=set(['field2']))
self.assertIn('field1', dev_dict)
self.assertNotIn('field2', dev_dict)
self.assertNotIn('db_field1', dev_dict)
self.assertNotIn('db_field2', dev_dict)
# Passing kwargs to constructor works
dev_dict = block_device.BlockDeviceDict(field1='foo')
self.assertIn('field1', dev_dict)
self.assertIn('field2', dev_dict)
self.assertIsNone(dev_dict['field2'])
dev_dict = block_device.BlockDeviceDict(
{'field1': 'foo'}, field2='bar')
self.assertEqual('foo', dev_dict['field1'])
self.assertEqual('bar', dev_dict['field2'])
def test_init_prepend_dev_to_device_name(self):
bdm = {'id': 3, 'instance_uuid': uuids.instance,
'device_name': 'vda',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'boot_index': 0}
bdm_dict = block_device.BlockDeviceDict(bdm)
self.assertEqual('/dev/vda', bdm_dict['device_name'])
bdm['device_name'] = '/dev/vdb'
bdm_dict = block_device.BlockDeviceDict(bdm)
self.assertEqual('/dev/vdb', bdm_dict['device_name'])
bdm['device_name'] = None
bdm_dict = block_device.BlockDeviceDict(bdm)
self.assertIsNone(bdm_dict['device_name'])
def test_init_boolify_delete_on_termination(self):
# Make sure that when delete_on_termination is not passed it's
# still set to False and not None
bdm = {'id': 3, 'instance_uuid': uuids.instance,
'device_name': 'vda',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'boot_index': 0}
bdm_dict = block_device.BlockDeviceDict(bdm)
self.assertFalse(bdm_dict['delete_on_termination'])
def test_validate(self):
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
{'bogus_field': 'lame_val'})
lame_bdm = dict(self.new_mapping[2])
del lame_bdm['source_type']
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_bdm)
lame_bdm['no_device'] = True
block_device.BlockDeviceDict(lame_bdm)
lame_dev_bdm = dict(self.new_mapping[2])
lame_dev_bdm['device_name'] = "not a valid name"
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_dev_bdm)
lame_dev_bdm['device_name'] = ""
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_dev_bdm)
cool_volume_size_bdm = dict(self.new_mapping[2])
cool_volume_size_bdm['volume_size'] = '42'
cool_volume_size_bdm = block_device.BlockDeviceDict(
cool_volume_size_bdm)
self.assertEqual(42, cool_volume_size_bdm['volume_size'])
lame_volume_size_bdm = dict(self.new_mapping[2])
lame_volume_size_bdm['volume_size'] = 'some_non_int_string'
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_volume_size_bdm)
truthy_bdm = dict(self.new_mapping[2])
truthy_bdm['delete_on_termination'] = '1'
truthy_bdm = block_device.BlockDeviceDict(truthy_bdm)
self.assertTrue(truthy_bdm['delete_on_termination'])
verbose_bdm = dict(self.new_mapping[2])
verbose_bdm['boot_index'] = 'first'
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
verbose_bdm)
def test_from_legacy(self):
for legacy, new in zip(self.legacy_mapping, self.new_mapping):
self.assertThat(
block_device.BlockDeviceDict.from_legacy(legacy),
matchers.IsSubDictOf(new))
def test_from_legacy_mapping(self):
def _get_image_bdms(bdms):
return [bdm for bdm in bdms if bdm['source_type'] == 'image']
def _get_bootable_bdms(bdms):
return [bdm for bdm in bdms
if (bdm['boot_index'] is not None and
bdm['boot_index'] >= 0)]
new_no_img = block_device.from_legacy_mapping(self.legacy_mapping)
self.assertEqual(0, len(_get_image_bdms(new_no_img)))
for new, expected in zip(new_no_img, self.new_mapping):
self.assertThat(new, matchers.IsSubDictOf(expected))
new_with_img = block_device.from_legacy_mapping(
self.legacy_mapping, 'fake_image_ref')
image_bdms = _get_image_bdms(new_with_img)
boot_bdms = _get_bootable_bdms(new_with_img)
self.assertEqual(1, len(image_bdms))
self.assertEqual(1, len(boot_bdms))
self.assertEqual(0, image_bdms[0]['boot_index'])
self.assertEqual('image', boot_bdms[0]['source_type'])
new_with_img_and_root = block_device.from_legacy_mapping(
self.legacy_mapping, 'fake_image_ref', 'sda1')
image_bdms = _get_image_bdms(new_with_img_and_root)
boot_bdms = _get_bootable_bdms(new_with_img_and_root)
self.assertEqual(0, len(image_bdms))
self.assertEqual(1, len(boot_bdms))
self.assertEqual(0, boot_bdms[0]['boot_index'])
self.assertEqual('volume', boot_bdms[0]['source_type'])
new_no_root = block_device.from_legacy_mapping(
self.legacy_mapping, 'fake_image_ref', 'sda1', no_root=True)
self.assertEqual(0, len(_get_image_bdms(new_no_root)))
self.assertEqual(0, len(_get_bootable_bdms(new_no_root)))
def test_from_api(self):
for api, new in zip(self.api_mapping, self.new_mapping):
new['connection_info'] = None
if new['snapshot_id']:
new['volume_id'] = None
self.assertThat(
block_device.BlockDeviceDict.from_api(api, False),
matchers.IsSubDictOf(new))
def test_from_api_invalid_blank_id(self):
api_dict = {'id': 1,
'source_type': 'blank',
'destination_type': 'volume',
'uuid': 'fake-volume-id-1',
'delete_on_termination': True,
'boot_index': -1}
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict.from_api, api_dict,
False)
def test_from_api_invalid_source_to_local_mapping(self):
api_dict = {'id': 1,
'source_type': 'image',
'destination_type': 'local',
'uuid': 'fake-volume-id-1'}
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict.from_api, api_dict,
False)
def test_from_api_valid_source_to_local_mapping(self):
api_dict = {'id': 1,
'source_type': 'image',
'destination_type': 'local',
'volume_id': 'fake-volume-id-1',
'uuid': 1,
'boot_index': 0}
retexp = block_device.BlockDeviceDict(
{'id': 1,
'source_type': 'image',
'image_id': 1,
'destination_type': 'local',
'volume_id': 'fake-volume-id-1',
'boot_index': 0})
self.assertEqual(retexp,
block_device.BlockDeviceDict.from_api(api_dict, True))
def test_from_api_valid_source_to_local_mapping_with_string_bi(self):
api_dict = {'id': 1,
'source_type': 'image',
'destination_type': 'local',
'volume_id': 'fake-volume-id-1',
'uuid': 1,
'boot_index': '0'}
retexp = block_device.BlockDeviceDict(
{'id': 1,
'source_type': 'image',
'image_id': 1,
'destination_type': 'local',
'volume_id': 'fake-volume-id-1',
'boot_index': 0})
self.assertEqual(retexp,
block_device.BlockDeviceDict.from_api(api_dict, True))
def test_legacy(self):
for legacy, new in zip(self.legacy_mapping, self.new_mapping):
self.assertThat(
legacy,
matchers.IsSubDictOf(new.legacy()))
def test_legacy_mapping(self):
got_legacy = block_device.legacy_mapping(self.new_mapping)
for legacy, expected in zip(got_legacy, self.legacy_mapping):
self.assertThat(expected, matchers.IsSubDictOf(legacy))
def test_legacy_source_image(self):
for legacy, new in zip(self.legacy_mapping_source_image,
self.new_mapping_source_image):
if new['destination_type'] == 'volume':
self.assertThat(legacy, matchers.IsSubDictOf(new.legacy()))
else:
self.assertRaises(exception.InvalidBDMForLegacy, new.legacy)
def test_legacy_mapping_source_image(self):
got_legacy = block_device.legacy_mapping(self.new_mapping)
for legacy, expected in zip(got_legacy, self.legacy_mapping):
self.assertThat(expected, matchers.IsSubDictOf(legacy))
def test_legacy_mapping_from_object_list(self):
bdm1 = objects.BlockDeviceMapping()
bdm1 = objects.BlockDeviceMapping._from_db_object(
None, bdm1, fake_block_device.FakeDbBlockDeviceDict(
self.new_mapping[0]))
bdm2 = objects.BlockDeviceMapping()
bdm2 = objects.BlockDeviceMapping._from_db_object(
None, bdm2, fake_block_device.FakeDbBlockDeviceDict(
self.new_mapping[1]))
bdmlist = objects.BlockDeviceMappingList()
bdmlist.objects = [bdm1, bdm2]
block_device.legacy_mapping(bdmlist)
def test_image_mapping(self):
removed_fields = ['id', 'instance_uuid', 'connection_info',
'created_at', 'updated_at', 'deleted_at', 'deleted']
for bdm in self.new_mapping:
mapping_bdm = fake_block_device.FakeDbBlockDeviceDict(
bdm).get_image_mapping()
for fld in removed_fields:
self.assertNotIn(fld, mapping_bdm)
def _test_snapshot_from_bdm(self, template):
snapshot = block_device.snapshot_from_bdm('new-snapshot-id', template)
self.assertEqual('new-snapshot-id', snapshot['snapshot_id'])
self.assertEqual('snapshot', snapshot['source_type'])
self.assertEqual('volume', snapshot['destination_type'])
self.assertEqual(template.volume_size, snapshot['volume_size'])
self.assertEqual(template.delete_on_termination,
snapshot['delete_on_termination'])
self.assertEqual(template.device_name, snapshot['device_name'])
for key in ['disk_bus', 'device_type', 'boot_index']:
self.assertEqual(template[key], snapshot[key])
def test_snapshot_from_bdm(self):
for bdm in self.new_mapping:
self._test_snapshot_from_bdm(objects.BlockDeviceMapping(**bdm))
def test_snapshot_from_object(self):
for bdm in self.new_mapping[:-1]:
obj = objects.BlockDeviceMapping()
obj = objects.BlockDeviceMapping._from_db_object(
None, obj, fake_block_device.FakeDbBlockDeviceDict(
bdm))
self._test_snapshot_from_bdm(obj)
|
{
"content_hash": "8bdbcf5b3d668a56fc02c1f214035f65",
"timestamp": "",
"source": "github",
"line_count": 657,
"max_line_length": 79,
"avg_line_length": 43.3455098934551,
"alnum_prop": 0.541821757145867,
"repo_name": "jianghuaw/nova",
"id": "859c43280b4c77084d4c0e553c995bcd1cd0e903",
"size": "29108",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nova/tests/unit/test_block_device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1435"
},
{
"name": "PHP",
"bytes": "32515"
},
{
"name": "Python",
"bytes": "19932348"
},
{
"name": "Shell",
"bytes": "28290"
},
{
"name": "Smarty",
"bytes": "339635"
}
],
"symlink_target": ""
}
|
import synapseclient
import synapseclient.utils as utils
from synapseclient.exceptions import *
from synapseclient import Activity
from synapseclient import Project, Folder, File
from synapseclient import Evaluation, Submission, SubmissionStatus
from synapseclient import Wiki
from synapseclient import Column
from synapseclient.dict_object import DictObject
from synapseclient.annotations import from_submission_status_annotations
from collections import OrderedDict
from datetime import datetime, timedelta
from itertools import izip
from StringIO import StringIO
import copy
import argparse
import lock
import json
import math
import os
import random
import re
import sys
import tarfile
import tempfile
import time
import traceback
import urllib
import uuid
import warnings
try:
import challenge_config as conf
except Exception as ex1:
sys.stderr.write("\nPlease configure your challenge. See challenge_config.template.py for an example.\n\n")
raise ex1
import messages
# the batch size can be bigger, we do this just to demonstrate batching
BATCH_SIZE = 20
# how many times to we retry batch uploads of submission annotations
BATCH_UPLOAD_RETRY_COUNT = 5
UUID_REGEX = re.compile('[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')
# A module level variable to hold the Synapse connection
syn = None
def to_column_objects(leaderboard_columns):
"""
Turns a list of dictionaries of column configuration information defined
in conf.leaderboard_columns) into a list of Column objects
"""
column_keys = ['name', 'columnType', 'maximumSize', 'enumValues', 'defaultValue']
return [Column(**{ key: col[key] for key in column_keys if key in col}) for col in leaderboard_columns]
def get_user_name(profile):
names = []
if 'firstName' in profile and profile['firstName'] and profile['firstName'].strip():
names.append(profile['firstName'])
if 'lastName' in profile and profile['lastName'] and profile['lastName'].strip():
names.append(profile['lastName'])
if len(names)==0:
names.append(profile['userName'])
return " ".join(names)
def update_submissions_status_batch(evaluation, statuses):
"""
Update statuses in batch. This can be much faster than individual updates,
especially in rank based scoring methods which recalculate scores for all
submissions each time a new submission is received.
"""
for retry in range(BATCH_UPLOAD_RETRY_COUNT):
try:
token = None
offset = 0
while offset < len(statuses):
batch = {"statuses" : statuses[offset:offset+BATCH_SIZE],
"isFirstBatch" : (offset==0),
"isLastBatch" : (offset+BATCH_SIZE>=len(statuses)),
"batchToken" : token}
response = syn.restPUT("/evaluation/%s/statusBatch" % evaluation.id, json.dumps(batch))
token = response.get('nextUploadToken', None)
offset += BATCH_SIZE
except SynapseHTTPError as err:
# on 412 ConflictingUpdateException we want to retry
if err.response.status_code == 412:
# sys.stderr.write('%s, retrying...\n' % err.message)
time.sleep(2)
else:
raise
class Query(object):
"""
An object that helps with paging through annotation query results.
Also exposes properties totalNumberOfResults, headers and rows.
"""
def __init__(self, query, limit=20, offset=0):
self.query = query
self.limit = limit
self.offset = offset
self.fetch_batch_of_results()
def fetch_batch_of_results(self):
uri = "/evaluation/submission/query?query=" + urllib.quote_plus("%s limit %s offset %s" % (self.query, self.limit, self.offset))
results = syn.restGET(uri)
self.totalNumberOfResults = results['totalNumberOfResults']
self.headers = results['headers']
self.rows = results['rows']
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i >= len(self.rows):
if self.offset >= self.totalNumberOfResults:
raise StopIteration()
self.fetch_batch_of_results()
values = self.rows[self.i]['values']
self.i += 1
self.offset += 1
return values
def validate(evaluation, dry_run=False):
if type(evaluation) != Evaluation:
evaluation = syn.getEvaluation(evaluation)
print "\n\nValidating", evaluation.id, evaluation.name
print "-" * 60
sys.stdout.flush()
for submission, status in syn.getSubmissionBundles(evaluation, status='RECEIVED'):
## refetch the submission so that we get the file path
## to be later replaced by a "downloadFiles" flag on getSubmissionBundles
submission = syn.getSubmission(submission)
print "validating", submission.id, submission.name
try:
is_valid, validation_message = conf.validate_submission(evaluation, submission)
except Exception as ex1:
is_valid = False
print "Exception during validation:", type(ex1), ex1, ex1.message
traceback.print_exc()
validation_message = str(ex1)
status.status = "VALIDATED" if is_valid else "INVALID"
if not dry_run:
status = syn.store(status)
## send message AFTER storing status to ensure we don't get repeat messages
profile = syn.getUserProfile(submission.userId)
if is_valid:
messages.validation_passed(
userIds=[submission.userId],
username=get_user_name(profile),
queue_name=evaluation.name,
submission_id=submission.id,
submission_name=submission.name)
else:
messages.validation_failed(
userIds=[submission.userId],
username=get_user_name(profile),
queue_name=evaluation.name,
submission_id=submission.id,
submission_name=submission.name,
message=validation_message)
def score(evaluation, dry_run=False):
if type(evaluation) != Evaluation:
evaluation = syn.getEvaluation(evaluation)
print '\n\nScoring ', evaluation.id, evaluation.name
print "-" * 60
sys.stdout.flush()
for submission, status in syn.getSubmissionBundles(evaluation, status='VALIDATED'):
status.status = "INVALID"
## refetch the submission so that we get the file path
## to be later replaced by a "downloadFiles" flag on getSubmissionBundles
submission = syn.getSubmission(submission)
try:
score, message = conf.score_submission(evaluation, submission)
print "scored:", submission.id, submission.name, submission.userId, score
## fill in team in submission status annotations
if 'teamId' in submission:
team = syn.restGET('/team/{id}'.format(id=submission.teamId))
if 'name' in team:
score['team'] = team['name']
else:
score['team'] = submission.teamId
elif 'userId' in submission:
profile = syn.getUserProfile(submission.userId)
score['team'] = get_user_name(profile)
else:
score['team'] = '?'
status.annotations = synapseclient.annotations.to_submission_status_annotations(score)
status.status = "SCORED"
## if there's a table configured, update it
if not dry_run and evaluation.id in conf.leaderboard_tables:
update_leaderboard_table(conf.leaderboard_tables[evaluation.id], submission, fields=score, dry_run=False)
except Exception as ex1:
sys.stderr.write('\n\nError scoring submission %s %s:\n' % (submission.name, submission.id))
st = StringIO()
traceback.print_exc(file=st)
sys.stderr.write(st.getvalue())
sys.stderr.write('\n')
message = st.getvalue()
if conf.ADMIN_USER_IDS:
submission_info = "submission id: %s\nsubmission name: %s\nsubmitted by user id: %s\n\n" % (submission.id, submission.name, submission.userId)
messages.error_notification(userIds=conf.ADMIN_USER_IDS, message=submission_info+st.getvalue())
if not dry_run:
status = syn.store(status)
## send message AFTER storing status to ensure we don't get repeat messages
profile = syn.getUserProfile(submission.userId)
if status.status == 'SCORED':
messages.scoring_succeeded(
userIds=[submission.userId],
message=message,
username=get_user_name(profile),
queue_name=evaluation.name,
submission_name=submission.name,
submission_id=submission.id)
else:
messages.scoring_failed(
userIds=[submission.userId],
message=message,
username=get_user_name(profile),
queue_name=evaluation.name,
submission_name=submission.name,
submission_id=submission.id)
sys.stdout.write('\n')
def create_leaderboard_table(name, columns, parent, evaluation, dry_run=False):
if not dry_run:
schema = syn.store(Schema(name=name, columns=cols, parent=project))
for submission, status in syn.getSubmissionBundles(evaluation):
annotations = synapseclient.annotations.from_submission_status_annotations(status.annotations) if 'annotations' in status else {}
update_leaderboard_table(schema.id, submission, annotations, dry_run)
def update_leaderboard_table(leaderboard_table, submission, fields, dry_run=False):
"""
Insert or update a record in a leaderboard table for a submission.
:param fields: a dictionary including all scoring statistics plus the team name for the submission.
"""
## copy fields from submission
## fields should already contain scoring stats
fields['objectId'] = submission.id
fields['userId'] = submission.userId
fields['entityId'] = submission.entityId
fields['versionNumber'] = submission.versionNumber
fields['name'] = submission.name
results = syn.tableQuery("select * from %s where objectId=%s" % (leaderboard_table, submission.id), resultsAs="rowset")
rowset = results.asRowSet()
## figure out if we're inserting or updating
if len(rowset['rows']) == 0:
row = {'values':[]}
rowset['rows'].append(row)
mode = 'insert'
elif len(rowset['rows']) == 1:
row = rowset['rows'][0]
mode = 'update'
else:
## shouldn't happen
raise RuntimeError("Multiple entries in leaderboard table %s for submission %s" % (leaderboard_table,submission.id))
## build list of fields in proper order according to headers
row['values'] = [fields.get(col['name'], None) for col in rowset['headers']]
if dry_run:
print mode, "row "+row['rowId'] if 'rowId' in row else "new row", row['values']
else:
return syn.store(rowset)
def query(evaluation, columns, out=sys.stdout):
"""Test the query that will be run to construct the leaderboard"""
if type(evaluation) != Evaluation:
evaluation = syn.getEvaluation(evaluation)
## Note: Constructing the index on which the query operates is an
## asynchronous process, so we may need to wait a bit.
results = Query(query="select * from evaluation_%s where status==\"SCORED\"" % evaluation.id)
## annotate each column with it's position in the query results, if it's there
cols = copy.deepcopy(columns)
for column in cols:
if column['name'] in results.headers:
column['index'] = results.headers.index(column['name'])
indices = [column['index'] for column in cols if 'index' in column]
column_index = {column['index']:column for column in cols if 'index' in column}
def column_to_string(row, column_index, i):
if column_index[i]['columnType']=="DOUBLE":
return "%0.6f"%float(row[i])
elif column_index[i]['columnType']=="STRING":
return "\"%s\""%unicode(row[i]).encode('utf-8')
else:
return unicode(row[i]).encode('utf-8')
## print leaderboard
out.write(",".join([column['name'] for column in cols if 'index' in column]) + "\n")
for row in results:
out.write(",".join(column_to_string(row, column_index, i) for i in indices))
out.write("\n")
def list_submissions(evaluation, status=None, **kwargs):
if isinstance(evaluation, basestring):
evaluation = syn.getEvaluation(evaluation)
print '\n\nSubmissions for: %s %s' % (evaluation.id, evaluation.name.encode('utf-8'))
print '-' * 60
for submission, status in syn.getSubmissionBundles(evaluation, status=status):
print submission.id, submission.createdOn, status.status, submission.name.encode('utf-8'), submission.userId
def list_evaluations(project):
print '\n\nEvaluations for project: ', utils.id_of(project)
print '-' * 60
evaluations = syn.getEvaluationByContentSource(project)
for evaluation in evaluations:
print "Evaluation: %s" % evaluation.id, evaluation.name.encode('utf-8')
def archive(evaluation, destination=None, name=None, query=None):
"""
Archive the submissions for the given evaluation queue and store them in the destination synapse folder.
:param evaluation: a synapse evaluation queue or its ID
:param destination: a synapse folder or its ID
:param query: a query that will return the desired submissions. At least the ID must be returned.
defaults to _select * from evaluation_[EVAL_ID] where status=="SCORED"_.
"""
tempdir = tempfile.mkdtemp()
archive_dirname = 'submissions_%s' % utils.id_of(evaluation)
if not query:
query = 'select * from evaluation_%s where status=="SCORED"' % utils.id_of(evaluation)
## for each submission, download it's associated file and write a line of metadata
results = Query(query=query)
if 'objectId' not in results.headers:
raise ValueError("Can't find the required field \"objectId\" in the results of the query: \"{0}\"".format(query))
if not name:
name = 'submissions_%s.tgz' % utils.id_of(evaluation)
tar_path = os.path.join(tempdir, name)
print "creating tar at:", tar_path
print results.headers
with tarfile.open(tar_path, mode='w:gz') as archive:
with open(os.path.join(tempdir, 'submission_metadata.csv'), 'w') as f:
f.write( (','.join(hdr for hdr in (results.headers + ['filename'])) + '\n').encode('utf-8') )
for result in results:
## retrieve file into cache and copy it to destination
submission = syn.getSubmission(result[results.headers.index('objectId')])
prefixed_filename = submission.id + "_" + os.path.basename(submission.filePath)
archive.add(submission.filePath, arcname=os.path.join(archive_dirname, prefixed_filename))
line = (','.join(unicode(item) for item in (result+[prefixed_filename]))).encode('utf-8')
print line
f.write(line + '\n')
archive.add(
name=os.path.join(tempdir, 'submission_metadata.csv'),
arcname=os.path.join(archive_dirname, 'submission_metadata.csv'))
entity = syn.store(File(tar_path, parent=destination), evaluation_id=utils.id_of(evaluation))
print "created:", entity.id, entity.name
return entity.id
## ==================================================
## Handlers for commands
## ==================================================
def command_list(args):
"""
List either the submissions to an evaluation queue or
the evaluation queues associated with a given project.
"""
if args.all:
for queue_info in conf.evaluation_queues:
list_submissions(evaluation=queue_info['id'],
status=args.status)
elif args.challenge_project:
list_evaluations(project=args.challenge_project)
elif args.evaluation:
list_submissions(evaluation=args.evaluation,
status=args.status)
else:
list_evaluations(project=conf.CHALLENGE_SYN_ID)
def command_check_status(args):
submission = syn.getSubmission(args.submission)
status = syn.getSubmissionStatus(args.submission)
evaluation = syn.getEvaluation(submission.evaluationId)
## deleting the entity key is a hack to work around a bug which prevents
## us from printing a submission
del submission['entity']
print unicode(evaluation).encode('utf-8')
print unicode(submission).encode('utf-8')
print unicode(status).encode('utf-8')
def command_reset(args):
if args.rescore_all:
for queue_info in conf.evaluation_queues:
for submission, status in syn.getSubmissionBundles(queue_info['id'], status="SCORED"):
status.status = args.status
if not args.dry_run:
print unicode(syn.store(status)).encode('utf-8')
for submission in args.submission:
status = syn.getSubmissionStatus(submission)
status.status = args.status
if not args.dry_run:
print unicode(syn.store(status)).encode('utf-8')
def command_validate(args):
if args.all:
for queue_info in conf.evaluation_queues:
validate(queue_info['id'], dry_run=args.dry_run)
elif args.evaluation:
validate(args.evaluation, dry_run=args.dry_run)
else:
sys.stderr.write("\nValidate command requires either an evaluation ID or --all to validate all queues in the challenge")
def command_score(args):
if args.all:
for queue_info in conf.evaluation_queues:
score(queue_info['id'], dry_run=args.dry_run)
elif args.evaluation:
score(args.evaluation, dry_run=args.dry_run)
else:
sys.stderr.write("\Score command requires either an evaluation ID or --all to score all queues in the challenge")
def command_rank(args):
raise NotImplementedError('Implement a ranking function for your challenge')
def command_leaderboard(args):
## show columns specific to an evaluation, if available
leaderboard_cols = conf.leaderboard_columns.get(args.evaluation, conf.LEADERBOARD_COLUMNS)
## write out to file if --out args given
if args.out is not None:
with open(args.out, 'w') as f:
query(args.evaluation, columns=leaderboard_cols, out=f)
print "Wrote leaderboard out to:", args.out
else:
query(args.evaluation, columns=leaderboard_cols)
def command_archive(args):
archive(args.evaluation, args.destination, name=args.name, query=args.query)
## ==================================================
## main method
## ==================================================
def main():
if conf.CHALLENGE_SYN_ID == "":
sys.stderr.write("Please configure your challenge. See sample_challenge.py for an example.")
global syn
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--user", help="UserName", default=None)
parser.add_argument("-p", "--password", help="Password", default=None)
parser.add_argument("--notifications", help="Send error notifications to challenge admins", action="store_true", default=False)
parser.add_argument("--send-messages", help="Send validation and scoring messages to participants", action="store_true", default=False)
parser.add_argument("--acknowledge-receipt", help="Send confirmation message on passing validation to participants", action="store_true", default=False)
parser.add_argument("--dry-run", help="Perform the requested command without updating anything in Synapse", action="store_true", default=False)
parser.add_argument("--debug", help="Show verbose error output from Synapse API calls", action="store_true", default=False)
subparsers = parser.add_subparsers(title="subcommand")
parser_list = subparsers.add_parser('list', help="List submissions to an evaluation or list evaluations")
parser_list.add_argument("evaluation", metavar="EVALUATION-ID", nargs='?', default=None)
parser_list.add_argument("--challenge-project", "--challenge", "--project", metavar="SYNAPSE-ID", default=None)
parser_list.add_argument("-s", "--status", default=None)
parser_list.add_argument("--all", action="store_true", default=False)
parser_list.set_defaults(func=command_list)
parser_status = subparsers.add_parser('status', help="Check the status of a submission")
parser_status.add_argument("submission")
parser_status.set_defaults(func=command_check_status)
parser_reset = subparsers.add_parser('reset', help="Reset a submission to RECEIVED for re-scoring (or set to some other status)")
parser_reset.add_argument("submission", metavar="SUBMISSION-ID", type=int, nargs='*', help="One or more submission IDs, or omit if using --rescore-all")
parser_reset.add_argument("-s", "--status", default='RECEIVED')
parser_reset.add_argument("--rescore-all", action="store_true", default=False)
parser_reset.set_defaults(func=command_reset)
parser_validate = subparsers.add_parser('validate', help="Validate all RECEIVED submissions to an evaluation")
parser_validate.add_argument("evaluation", metavar="EVALUATION-ID", nargs='?', default=None, )
parser_validate.add_argument("--all", action="store_true", default=False)
parser_validate.set_defaults(func=command_validate)
parser_score = subparsers.add_parser('score', help="Score all VALIDATED submissions to an evaluation")
parser_score.add_argument("evaluation", metavar="EVALUATION-ID", nargs='?', default=None)
parser_score.add_argument("--all", action="store_true", default=False)
parser_score.set_defaults(func=command_score)
parser_rank = subparsers.add_parser('rank', help="Rank all SCORED submissions to an evaluation")
parser_rank.add_argument("evaluation", metavar="EVALUATION-ID", default=None)
parser_rank.set_defaults(func=command_rank)
parser_archive = subparsers.add_parser('archive', help="Archive submissions to a challenge")
parser_archive.add_argument("evaluation", metavar="EVALUATION-ID", default=None)
parser_archive.add_argument("destination", metavar="FOLDER-ID", default=None)
parser_archive.add_argument("-q", "--query", default=None)
parser_archive.add_argument("-n", "--name", default=None)
parser_archive.set_defaults(func=command_archive)
parser_leaderboard = subparsers.add_parser('leaderboard', help="Print the leaderboard for an evaluation")
parser_leaderboard.add_argument("evaluation", metavar="EVALUATION-ID", default=None)
parser_leaderboard.add_argument("--out", default=None)
parser_leaderboard.set_defaults(func=command_leaderboard)
args = parser.parse_args()
print "\n" * 2, "=" * 75
print datetime.utcnow().isoformat()
## Acquire lock, don't run two scoring scripts at once
try:
update_lock = lock.acquire_lock_or_fail('challenge', max_age=timedelta(hours=4))
except lock.LockedException:
print u"Is the scoring script already running? Can't acquire lock."
# can't acquire lock, so return error code 75 which is a
# temporary error according to /usr/include/sysexits.h
return 75
try:
syn = synapseclient.Synapse(debug=args.debug)
if not args.user:
args.user = os.environ.get('SYNAPSE_USER', None)
if not args.password:
args.password = os.environ.get('SYNAPSE_PASSWORD', None)
syn.login(email=args.user, password=args.password)
## initialize messages
messages.syn = syn
messages.dry_run = args.dry_run
messages.send_messages = args.send_messages
messages.send_notifications = args.notifications
messages.acknowledge_receipt = args.acknowledge_receipt
args.func(args)
except Exception as ex1:
sys.stderr.write('Error in scoring script:\n')
st = StringIO()
traceback.print_exc(file=st)
sys.stderr.write(st.getvalue())
sys.stderr.write('\n')
if conf.ADMIN_USER_IDS:
messages.error_notification(userIds=conf.ADMIN_USER_IDS, message=st.getvalue(), queue_name=conf.CHALLENGE_NAME)
finally:
update_lock.release()
print "\ndone: ", datetime.utcnow().isoformat()
print "=" * 75, "\n" * 2
if __name__ == '__main__':
main()
|
{
"content_hash": "a36ba3fb955b01e05c57a83f7154922b",
"timestamp": "",
"source": "github",
"line_count": 614,
"max_line_length": 158,
"avg_line_length": 40.4413680781759,
"alnum_prop": 0.6511215818936007,
"repo_name": "Sage-Bionetworks/U4CChallenge",
"id": "549c76c703cec834b9ff2f14d5f390ae44323ddd",
"size": "25359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/challenge.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "24891"
},
{
"name": "Python",
"bytes": "59603"
},
{
"name": "R",
"bytes": "11499"
}
],
"symlink_target": ""
}
|
class Demo:
@classmethod
def klassmeth(*args):
return args
@staticmethod
def statmeth(*args):
return args
print(Demo.klassmeth())
print(Demo.klassmeth('spam'))
print(Demo.statmeth())
print(Demo.statmeth('spam'))
|
{
"content_hash": "f3ad14d94ccc47052202fa3e8b0520c7",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 29,
"avg_line_length": 20.333333333333332,
"alnum_prop": 0.6598360655737705,
"repo_name": "stoneflyop1/fluent_py",
"id": "626c9fcf8c8783a790ebe902cfcc921054f5fbc8",
"size": "244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ch09/methoddemo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59464"
}
],
"symlink_target": ""
}
|
import asyncio
try:
import selectors
except ImportError:
import asyncio.selectors as selectors
import tkinter
import sys
if sys.platform == 'win32':
raise ImportError('%s is not available on your platform'.format(__name__))
class _TkinterSelector(selectors._BaseSelectorImpl):
def __init__(self):
super().__init__()
self._tk = tkinter.Tk(useTk=0)
self._ready = []
def register(self, fileobj, events, data=None):
key = super().register(fileobj, events, data)
mask = 0
if events & selectors.EVENT_READ:
mask |= tkinter.READABLE
if events & selectors.EVENT_WRITE:
mask |= tkinter.WRITABLE
def ready(fd, mask):
assert key.fd == fd
events = 0
if mask & tkinter.READABLE:
events |= selectors.EVENT_READ
if mask & tkinter.WRITABLE:
events |= selectors.EVENT_WRITE
self._ready.append((key, events))
self._tk.createfilehandler(key.fd, mask, ready)
return key
def unregister(self, fileobj):
key = super().unregister(fileobj)
self._tk.deletefilehandler(key.fd)
return key
def select(self, timeout=None):
self._ready = []
if timeout is not None:
timeout = int(timeout*1000)
token = self._tk.createtimerhandler(timeout, lambda: True)
self._tk.dooneevent()
if timeout is not None:
token.deletetimerhandler()
return self._ready
class TkinterEventLoopPolicy(asyncio.DefaultEventLoopPolicy):
def new_event_loop(self):
try:
return self._loop_factory(selector=_TkinterSelector())
except TypeError:
raise Exception('The default event loop is not a selector event loop')
|
{
"content_hash": "70ab631606f51db4c97f554f807817bb",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 82,
"avg_line_length": 31.482758620689655,
"alnum_prop": 0.6024096385542169,
"repo_name": "montag451/aiotkinter",
"id": "e66b3028cff211ef1f7333d4611c0e6fc5c6c697",
"size": "1826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiotkinter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2688"
}
],
"symlink_target": ""
}
|
'''Precise framerate calculation, scheduling and framerate limiting.
Measuring time
==============
The `tick` and `get_fps` functions can be used in conjunction to fulfil most
games' basic requirements::
from pyglet import clock
while True:
dt = clock.tick()
# ... update and render ...
print 'FPS is %f' % clock.get_fps()
The ``dt`` value returned gives the number of seconds (as a float) since the
last "tick".
The `get_fps` function averages the framerate over a sliding window of
approximately 1 second. (You can calculate the instantaneous framerate by
taking the reciprocal of ``dt``).
Always remember to `tick` the clock!
Limiting frame-rate
===================
The framerate can be limited::
clock.set_fps_limit(60)
This causes `clock` to sleep during each `tick` in an attempt to keep the
number of ticks (frames) per second below 60.
The implementation uses platform-dependent high-resolution sleep functions
to achieve better accuracy with busy-waiting than would be possible using
just the `time` module.
Scheduling
==========
You can schedule a function to be called every time the clock is ticked::
def callback(dt):
print '%f seconds since last callback' % dt
clock.schedule(callback)
The `schedule_interval` method causes a function to be called every "n"
seconds::
clock.schedule_interval(callback, .5) # called twice a second
The `schedule_once` method causes a function to be called once "n" seconds
in the future::
clock.schedule_once(callback, 5) # called in 5 seconds
All of the `schedule` methods will pass on any additional args or keyword args
you specify to the callback function::
def animate(dt, velocity, sprite):
sprite.position += dt * velocity
clock.schedule(animate, velocity=5.0, sprite=alien)
You can cancel a function scheduled with any of these methods using
`unschedule`::
clock.unschedule(animate)
Displaying FPS
==============
The ClockDisplay class provides a simple FPS counter. You should create
an instance of ClockDisplay once during the application's start up::
fps_display = clock.ClockDisplay()
Call draw on the ClockDisplay object for each frame::
fps_display.draw()
There are several options to change the font, color and text displayed
within the __init__ method.
Using multiple clocks
=====================
The clock functions are all relayed to an instance of `Clock` which is
initialised with the module. You can get this instance to use directly::
clk = clock.get_default()
You can also replace the default clock with your own:
myclk = clock.Clock()
clock.set_default(myclk)
Each clock maintains its own set of scheduled functions and FPS
limiting/measurement. Each clock must be "ticked" separately.
Multiple and derived clocks potentially allow you to separate "game-time" and
"wall-time", or to synchronise your clock to an audio or video stream instead
of the system clock.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import time
import ctypes
import pyglet.lib
from pyglet import compat_platform
if compat_platform in ('win32', 'cygwin'):
# Win32 Sleep function is only 10-millisecond resolution, so instead
# use a waitable timer object, which has up to 100-nanosecond resolution
# (hardware and implementation dependent, of course).
_kernel32 = ctypes.windll.kernel32
class _ClockBase(object):
def __init__(self):
self._timer = _kernel32.CreateWaitableTimerA(None, True, None)
def sleep(self, microseconds):
delay = ctypes.c_longlong(int(-microseconds * 10))
_kernel32.SetWaitableTimer(self._timer, ctypes.byref(delay),
0, ctypes.c_void_p(), ctypes.c_void_p(), False)
_kernel32.WaitForSingleObject(self._timer, 0xffffffff)
_default_time_function = time.clock
else:
_c = pyglet.lib.load_library('c')
_c.usleep.argtypes = [ctypes.c_ulong]
class _ClockBase(object):
def sleep(self, microseconds):
_c.usleep(int(microseconds))
_default_time_function = time.time
class _ScheduledItem(object):
__slots__ = ['func', 'args', 'kwargs']
def __init__(self, func, args, kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
class _ScheduledIntervalItem(object):
__slots__ = ['func', 'interval', 'last_ts', 'next_ts',
'args', 'kwargs']
def __init__(self, func, interval, last_ts, next_ts, args, kwargs):
self.func = func
self.interval = interval
self.last_ts = last_ts
self.next_ts = next_ts
self.args = args
self.kwargs = kwargs
def _dummy_schedule_func(*args, **kwargs):
'''Dummy function that does nothing, placed onto zombie scheduled items
to ensure they have no side effect if already queued inside tick() method.
'''
pass
class Clock(_ClockBase):
'''Class for calculating and limiting framerate, and for calling scheduled
functions.
'''
#: The minimum amount of time in seconds this clock will attempt to sleep
#: for when framerate limiting. Higher values will increase the
#: accuracy of the limiting but also increase CPU usage while
#: busy-waiting. Lower values mean the process sleeps more often, but is
#: prone to over-sleep and run at a potentially lower or uneven framerate
#: than desired.
MIN_SLEEP = 0.005
#: The amount of time in seconds this clock subtracts from sleep values
#: to compensate for lazy operating systems.
SLEEP_UNDERSHOOT = MIN_SLEEP - 0.001
# List of functions to call every tick.
_schedule_items = None
# List of schedule interval items kept in sort order.
_schedule_interval_items = None
# If True, a sleep(0) is inserted on every tick.
_force_sleep = False
def __init__(self, fps_limit=None, time_function=_default_time_function):
'''Initialise a Clock, with optional framerate limit and custom
time function.
:Parameters:
`fps_limit` : float
If not None, the maximum allowable framerate. Defaults
to None. Deprecated in pyglet 1.2.
`time_function` : function
Function to return the elapsed time of the application,
in seconds. Defaults to time.time, but can be replaced
to allow for easy time dilation effects or game pausing.
'''
super(Clock, self).__init__()
self.time = time_function
self.next_ts = self.time()
self.last_ts = None
self.times = []
self.set_fps_limit(fps_limit)
self.cumulative_time = 0
self._schedule_items = []
self._schedule_interval_items = []
def update_time(self):
'''Get the elapsed time since the last call to `update_time`.
This updates the clock's internal measure of time and returns
the difference since the last update (or since the clock was created).
:since: pyglet 1.2
:rtype: float
:return: The number of seconds since the last `update_time`, or 0
if this was the first time it was called.
'''
ts = self.time()
if self.last_ts is None:
delta_t = 0
else:
delta_t = ts - self.last_ts
self.times.insert(0, delta_t)
if len(self.times) > self.window_size:
self.cumulative_time -= self.times.pop()
self.cumulative_time += delta_t
self.last_ts = ts
return delta_t
def call_scheduled_functions(self, dt):
'''Call scheduled functions that elapsed on the last `update_time`.
:since: pyglet 1.2
:Parameters:
dt : float
The elapsed time since the last update to pass to each
scheduled function. This is *not* used to calculate which
functions have elapsed.
:rtype: bool
:return: True if any functions were called, otherwise False.
'''
ts = self.last_ts
result = False
# Call functions scheduled for every frame
# Dupe list just in case one of the items unchedules itself
for item in list(self._schedule_items):
result = True
item.func(dt, *item.args, **item.kwargs)
# Call all scheduled interval functions and reschedule for future.
need_resort = False
# Dupe list just in case one of the items unchedules itself
for item in list(self._schedule_interval_items):
if item.next_ts > ts:
break
result = True
item.func(ts - item.last_ts, *item.args, **item.kwargs)
if item.interval:
# Try to keep timing regular, even if overslept this time;
# but don't schedule in the past (which could lead to
# infinitely-worsing error).
item.next_ts = item.last_ts + item.interval
item.last_ts = ts
if item.next_ts <= ts:
if ts - item.next_ts < 0.05:
# Only missed by a little bit, keep the same schedule
item.next_ts = ts + item.interval
else:
# Missed by heaps, do a soft reschedule to avoid
# lumping everything together.
item.next_ts = self._get_soft_next_ts(ts, item.interval)
# Fake last_ts to avoid repeatedly over-scheduling in
# future. Unfortunately means the next reported dt is
# incorrect (looks like interval but actually isn't).
item.last_ts = item.next_ts - item.interval
need_resort = True
else:
item.next_ts = None
# Remove finished one-shots.
self._schedule_interval_items = \
[item for item in self._schedule_interval_items \
if item.next_ts is not None]
if need_resort:
# TODO bubble up changed items might be faster
self._schedule_interval_items.sort(key=lambda a: a.next_ts)
return result
def tick(self, poll=False):
'''Signify that one frame has passed.
This will call any scheduled functions that have elapsed.
:Parameters:
`poll` : bool
If True, the function will call any scheduled functions
but will not sleep or busy-wait for any reason. Recommended
for advanced applications managing their own sleep timers
only.
Since pyglet 1.1.
:rtype: float
:return: The number of seconds since the last "tick", or 0 if this was
the first frame.
'''
if poll:
if self.period_limit:
self.next_ts = self.next_ts + self.period_limit
else:
if self.period_limit:
self._limit()
if self._force_sleep:
self.sleep(0)
delta_t = self.update_time()
self.call_scheduled_functions(delta_t)
return delta_t
def _limit(self):
'''Sleep until the next frame is due. Called automatically by
`tick` if a framerate limit has been set.
This method uses several heuristics to determine whether to
sleep or busy-wait (or both).
'''
ts = self.time()
# Sleep to just before the desired time
sleeptime = self.get_sleep_time(False)
while sleeptime - self.SLEEP_UNDERSHOOT > self.MIN_SLEEP:
self.sleep(1000000 * (sleeptime - self.SLEEP_UNDERSHOOT))
sleeptime = self.get_sleep_time(False)
# Busy-loop CPU to get closest to the mark
sleeptime = self.next_ts - self.time()
while sleeptime > 0:
sleeptime = self.next_ts - self.time()
if sleeptime < -2 * self.period_limit:
# Missed the time by a long shot, let's reset the clock
# print >> sys.stderr, 'Step %f' % -sleeptime
self.next_ts = ts + 2 * self.period_limit
else:
# Otherwise keep the clock steady
self.next_ts = self.next_ts + self.period_limit
def get_sleep_time(self, sleep_idle):
'''Get the time until the next item is scheduled.
This method considers all scheduled items and the current
``fps_limit``, if any.
Applications can choose to continue receiving updates at the
maximum framerate during idle time (when no functions are scheduled),
or they can sleep through their idle time and allow the CPU to
switch to other processes or run in low-power mode.
If `sleep_idle` is ``True`` the latter behaviour is selected, and
``None`` will be returned if there are no scheduled items.
Otherwise, if `sleep_idle` is ``False``, a sleep time allowing
the maximum possible framerate (considering ``fps_limit``) will
be returned; or an earlier time if a scheduled function is ready.
:Parameters:
`sleep_idle` : bool
If True, the application intends to sleep through its idle
time; otherwise it will continue ticking at the maximum
frame rate allowed.
:rtype: float
:return: Time until the next scheduled event in seconds, or ``None``
if there is no event scheduled.
:since: pyglet 1.1
'''
if self._schedule_items or not sleep_idle:
if not self.period_limit:
return 0.
else:
wake_time = self.next_ts
if self._schedule_interval_items:
wake_time = min(wake_time,
self._schedule_interval_items[0].next_ts)
return max(wake_time - self.time(), 0.)
if self._schedule_interval_items:
return max(self._schedule_interval_items[0].next_ts - self.time(),
0)
return None
def set_fps_limit(self, fps_limit):
'''Set the framerate limit.
The framerate limit applies only when a function is scheduled
for every frame. That is, the framerate limit can be exceeded by
scheduling a function for a very small period of time.
:Parameters:
`fps_limit` : float
Maximum frames per second allowed, or None to disable
limiting.
:deprecated: Use `pyglet.app.run` and `schedule_interval` instead.
'''
if not fps_limit:
self.period_limit = None
else:
self.period_limit = 1. / fps_limit
self.window_size = fps_limit or 60
def get_fps_limit(self):
'''Get the framerate limit.
:rtype: float
:return: The framerate limit previously set in the constructor or
`set_fps_limit`, or None if no limit was set.
'''
if self.period_limit:
return 1. / self.period_limit
else:
return 0
def get_fps(self):
'''Get the average FPS of recent history.
The result is the average of a sliding window of the last "n" frames,
where "n" is some number designed to cover approximately 1 second.
:rtype: float
:return: The measured frames per second.
'''
if not self.cumulative_time:
return 0
return len(self.times) / self.cumulative_time
def schedule(self, func, *args, **kwargs):
'''Schedule a function to be called every frame.
The function should have a prototype that includes ``dt`` as the
first argument, which gives the elapsed time, in seconds, since the
last clock tick. Any additional arguments given to this function
are passed on to the callback::
def callback(dt, *args, **kwargs):
pass
:Parameters:
`func` : function
The function to call each frame.
'''
item = _ScheduledItem(func, args, kwargs)
self._schedule_items.append(item)
def _schedule_item(self, func, last_ts, next_ts, interval, *args, **kwargs):
item = _ScheduledIntervalItem(
func, interval, last_ts, next_ts, args, kwargs)
# Insert in sort order
for i, other in enumerate(self._schedule_interval_items):
if other.next_ts is not None and other.next_ts > next_ts:
self._schedule_interval_items.insert(i, item)
break
else:
self._schedule_interval_items.append(item)
def schedule_interval(self, func, interval, *args, **kwargs):
'''Schedule a function to be called every `interval` seconds.
Specifying an interval of 0 prevents the function from being
called again (see `schedule` to call a function as often as possible).
The callback function prototype is the same as for `schedule`.
:Parameters:
`func` : function
The function to call when the timer lapses.
`interval` : float
The number of seconds to wait between each call.
'''
last_ts = self.last_ts or self.next_ts
# Schedule from now, unless now is sufficiently close to last_ts, in
# which case use last_ts. This clusters together scheduled items that
# probably want to be scheduled together. The old (pre 1.1.1)
# behaviour was to always use self.last_ts, and not look at ts. The
# new behaviour is needed because clock ticks can now be quite
# irregular, and span several seconds.
ts = self.time()
if ts - last_ts > 0.2:
last_ts = ts
next_ts = last_ts + interval
self._schedule_item(func, last_ts, next_ts, interval, *args, **kwargs)
def schedule_interval_soft(self, func, interval, *args, **kwargs):
'''Schedule a function to be called every `interval` seconds,
beginning at a time that does not coincide with other scheduled
events.
This method is similar to `schedule_interval`, except that the
clock will move the interval out of phase with other scheduled
functions so as to distribute CPU more load evenly over time.
This is useful for functions that need to be called regularly,
but not relative to the initial start time. `pyglet.media`
does this for scheduling audio buffer updates, which need to occur
regularly -- if all audio updates are scheduled at the same time
(for example, mixing several tracks of a music score, or playing
multiple videos back simultaneously), the resulting load on the
CPU is excessive for those intervals but idle outside. Using
the soft interval scheduling, the load is more evenly distributed.
Soft interval scheduling can also be used as an easy way to schedule
graphics animations out of phase; for example, multiple flags
waving in the wind.
:since: pyglet 1.1
:Parameters:
`func` : function
The function to call when the timer lapses.
`interval` : float
The number of seconds to wait between each call.
'''
last_ts = self.last_ts or self.next_ts
# See schedule_interval
ts = self.time()
if ts - last_ts > 0.2:
last_ts = ts
next_ts = self._get_soft_next_ts(last_ts, interval)
last_ts = next_ts - interval
self._schedule_item(func, last_ts, next_ts, interval, *args, **kwargs)
def _get_soft_next_ts(self, last_ts, interval):
def taken(ts, e):
'''Return True if the given time has already got an item
scheduled nearby.
'''
for item in self._schedule_interval_items:
if item.next_ts is None:
pass
elif abs(item.next_ts - ts) <= e:
return True
elif item.next_ts > ts + e:
return False
return False
# Binary division over interval:
#
# 0 interval
# |--------------------------|
# 5 3 6 2 7 4 8 1 Order of search
#
# i.e., first scheduled at interval,
# then at interval/2
# then at interval/4
# then at interval*3/4
# then at ...
#
# Schedule is hopefully then evenly distributed for any interval,
# and any number of scheduled functions.
next_ts = last_ts + interval
if not taken(next_ts, interval / 4):
return next_ts
dt = interval
divs = 1
while True:
next_ts = last_ts
for i in range(divs - 1):
next_ts += dt
if not taken(next_ts, dt / 4):
return next_ts
dt /= 2
divs *= 2
# Avoid infinite loop in pathological case
if divs > 16:
return next_ts
def schedule_once(self, func, delay, *args, **kwargs):
'''Schedule a function to be called once after `delay` seconds.
The callback function prototype is the same as for `schedule`.
:Parameters:
`func` : function
The function to call when the timer lapses.
`delay` : float
The number of seconds to wait before the timer lapses.
'''
last_ts = self.last_ts or self.next_ts
# See schedule_interval
ts = self.time()
if ts - last_ts > 0.2:
last_ts = ts
next_ts = last_ts + delay
self._schedule_item(func, last_ts, next_ts, 0, *args, **kwargs)
def unschedule(self, func):
'''Remove a function from the schedule.
If the function appears in the schedule more than once, all occurrences
are removed. If the function was not scheduled, no error is raised.
:Parameters:
`func` : function
The function to remove from the schedule.
'''
# First replace zombie items' func with a dummy func that does
# nothing, in case the list has already been cloned inside tick().
# (Fixes issue 326).
for item in self._schedule_items:
if item.func == func:
item.func = _dummy_schedule_func
for item in self._schedule_interval_items:
if item.func == func:
item.func = _dummy_schedule_func
# Now remove matching items from both schedule lists.
self._schedule_items = \
[item for item in self._schedule_items \
if item.func is not _dummy_schedule_func]
self._schedule_interval_items = \
[item for item in self._schedule_interval_items \
if item.func is not _dummy_schedule_func]
# Default clock.
_default = Clock()
def set_default(default):
'''Set the default clock to use for all module-level functions.
By default an instance of `Clock` is used.
:Parameters:
`default` : `Clock`
The default clock to use.
'''
global _default
_default = default
def get_default():
'''Return the `Clock` instance that is used by all module-level
clock functions.
:rtype: `Clock`
:return: The default clock.
'''
return _default
def tick(poll=False):
'''Signify that one frame has passed on the default clock.
This will call any scheduled functions that have elapsed.
:Parameters:
`poll` : bool
If True, the function will call any scheduled functions
but will not sleep or busy-wait for any reason. Recommended
for advanced applications managing their own sleep timers
only.
Since pyglet 1.1.
:rtype: float
:return: The number of seconds since the last "tick", or 0 if this was the
first frame.
'''
return _default.tick(poll)
def get_sleep_time(sleep_idle):
'''Get the time until the next item is scheduled on the default clock.
See `Clock.get_sleep_time` for details.
:Parameters:
`sleep_idle` : bool
If True, the application intends to sleep through its idle
time; otherwise it will continue ticking at the maximum
frame rate allowed.
:rtype: float
:return: Time until the next scheduled event in seconds, or ``None``
if there is no event scheduled.
:since: pyglet 1.1
'''
return _default.get_sleep_time(sleep_idle)
def get_fps():
'''Return the current measured FPS of the default clock.
:rtype: float
'''
return _default.get_fps()
def set_fps_limit(fps_limit):
'''Set the framerate limit for the default clock.
:Parameters:
`fps_limit` : float
Maximum frames per second allowed, or None to disable
limiting.
:deprecated: Use `pyglet.app.run` and `schedule_interval` instead.
'''
_default.set_fps_limit(fps_limit)
def get_fps_limit():
'''Get the framerate limit for the default clock.
:return: The framerate limit previously set by `set_fps_limit`, or None if
no limit was set.
'''
return _default.get_fps_limit()
def schedule(func, *args, **kwargs):
'''Schedule 'func' to be called every frame on the default clock.
The arguments passed to func are ``dt``, followed by any ``*args`` and
``**kwargs`` given here.
:Parameters:
`func` : function
The function to call each frame.
'''
_default.schedule(func, *args, **kwargs)
def schedule_interval(func, interval, *args, **kwargs):
'''Schedule 'func' to be called every 'interval' seconds on the default
clock.
The arguments passed to 'func' are 'dt' (time since last function call),
followed by any ``*args`` and ``**kwargs`` given here.
:Parameters:
`func` : function
The function to call when the timer lapses.
`interval` : float
The number of seconds to wait between each call.
'''
_default.schedule_interval(func, interval, *args, **kwargs)
def schedule_interval_soft(func, interval, *args, **kwargs):
'''Schedule 'func' to be called every 'interval' seconds on the default
clock, beginning at a time that does not coincide with other scheduled
events.
The arguments passed to 'func' are 'dt' (time since last function call),
followed by any ``*args`` and ``**kwargs`` given here.
:see: `Clock.schedule_interval_soft`
:since: pyglet 1.1
:Parameters:
`func` : function
The function to call when the timer lapses.
`interval` : float
The number of seconds to wait between each call.
'''
_default.schedule_interval_soft(func, interval, *args, **kwargs)
def schedule_once(func, delay, *args, **kwargs):
'''Schedule 'func' to be called once after 'delay' seconds (can be
a float) on the default clock. The arguments passed to 'func' are
'dt' (time since last function call), followed by any ``*args`` and
``**kwargs`` given here.
If no default clock is set, the func is queued and will be scheduled
on the default clock as soon as it is created.
:Parameters:
`func` : function
The function to call when the timer lapses.
`delay` : float
The number of seconds to wait before the timer lapses.
'''
_default.schedule_once(func, delay, *args, **kwargs)
def unschedule(func):
'''Remove 'func' from the default clock's schedule. No error
is raised if the func was never scheduled.
:Parameters:
`func` : function
The function to remove from the schedule.
'''
_default.unschedule(func)
class ClockDisplay(object):
'''Display current clock values, such as FPS.
This is a convenience class for displaying diagnostics such as the
framerate. See the module documentation for example usage.
:Ivariables:
`label` : `pyglet.font.Text`
The label which is displayed.
:deprecated: This class presents values that are often misleading, as
they reflect the rate of clock ticks, not displayed framerate. Use
pyglet.window.FPSDisplay instead.
'''
def __init__(self,
font=None,
interval=0.25,
format='%(fps).2f',
color=(.5, .5, .5, .5),
clock=None):
'''Create a ClockDisplay.
All parameters are optional. By default, a large translucent
font will be used to display the FPS to two decimal places.
:Parameters:
`font` : `pyglet.font.Font`
The font to format text in.
`interval` : float
The number of seconds between updating the display.
`format` : str
A format string describing the format of the text. This
string is modulated with the dict ``{'fps' : fps}``.
`color` : 4-tuple of float
The color, including alpha, passed to ``glColor4f``.
`clock` : `Clock`
The clock which determines the time. If None, the default
clock is used.
'''
if clock is None:
clock = _default
self.clock = clock
self.clock.schedule_interval(self.update_text, interval)
if not font:
from pyglet.font import load as load_font
font = load_font('', 36, bold=True)
import pyglet.font
self.label = pyglet.font.Text(font, '', color=color, x=10, y=10)
self.format = format
def unschedule(self):
'''Remove the display from its clock's schedule.
`ClockDisplay` uses `Clock.schedule_interval` to periodically update
its display label. Even if the ClockDisplay is not being used any
more, its update method will still be scheduled, which can be a
resource drain. Call this method to unschedule the update method
and allow the ClockDisplay to be garbage collected.
:since: pyglet 1.1
'''
self.clock.unschedule(self.update_text)
def update_text(self, dt=0):
'''Scheduled method to update the label text.'''
fps = self.clock.get_fps()
self.label.text = self.format % {'fps': fps}
def draw(self):
'''Method called each frame to render the label.'''
self.label.draw()
def test_clock():
import getopt
import sys
test_seconds = 1
test_fps = 60
show_fps = False
options, args = getopt.getopt(sys.argv[1:], 'vht:f:',
['time=', 'fps=', 'help'])
for key, value in options:
if key in ('-t', '--time'):
test_seconds = float(value)
elif key in ('-f', '--fps'):
test_fps = float(value)
elif key in ('-v'):
show_fps = True
elif key in ('-h', '--help'):
print ('Usage: clock.py <options>\n'
'\n'
'Options:\n'
' -t --time Number of seconds to run for.\n'
' -f --fps Target FPS.\n'
'\n'
'Tests the clock module by measuring how close we can\n'
'get to the desired FPS by sleeping and busy-waiting.')
sys.exit(0)
set_fps_limit(test_fps)
start = time.time()
# Add one because first frame has no update interval.
n_frames = int(test_seconds * test_fps + 1)
print('Testing %f FPS for %f seconds...' % (test_fps, test_seconds))
for i in range(n_frames):
tick()
if show_fps:
print(get_fps())
total_time = time.time() - start
total_error = total_time - test_seconds
print('Total clock error: %f secs' % total_error)
print('Total clock error / secs: %f secs/secs' % \
(total_error / test_seconds))
# Not fair to add the extra frame in this calc, since no-one's interested
# in the startup situation.
print('Average FPS: %f' % ((n_frames - 1) / total_time))
if __name__ == '__main__':
test_clock()
|
{
"content_hash": "0d18666d061ed63ec51b7f9eed244c26",
"timestamp": "",
"source": "github",
"line_count": 958,
"max_line_length": 80,
"avg_line_length": 34.233820459290186,
"alnum_prop": 0.5934565190876936,
"repo_name": "AustinRoy7/Pomodoro-timer",
"id": "e51db2bbcd674978d6487ed4a123094332fb072a",
"size": "34515",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/pyglet/clock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1236"
},
{
"name": "C",
"bytes": "409362"
},
{
"name": "C++",
"bytes": "129981"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "8777284"
},
{
"name": "Tcl",
"bytes": "1285363"
}
],
"symlink_target": ""
}
|
from base import *
import telnetlib
from nsenter import Namespace
class QuaggaTelnetDaemon(object):
TELNET_PASSWORD = "zebra"
TELNET_PORT = 2605
def __init__(self, ctn):
self.ns = Namespace(ctn.get_pid(), 'net')
def __enter__(self):
self.ns.__enter__()
self.tn = telnetlib.Telnet('127.0.0.1', self.TELNET_PORT)
self.tn.read_until("Password: ")
self.tn.write(self.TELNET_PASSWORD + "\n")
self.tn.write("enable\n")
self.tn.read_until('bgpd#')
return self.tn
def __exit__(self, type, value, traceback):
self.tn.close()
self.ns.__exit__(type, value, traceback)
class QuaggaBGPContainer(BGPContainer):
WAIT_FOR_BOOT = 1
SHARED_VOLUME = '/etc/quagga'
def __init__(self, name, asn, router_id, ctn_image_name='osrg/quagga', zebra=False):
super(QuaggaBGPContainer, self).__init__(name, asn, router_id,
ctn_image_name)
self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
self.zebra = zebra
def run(self):
super(QuaggaBGPContainer, self).run()
return self.WAIT_FOR_BOOT
def get_global_rib(self, prefix='', rf='ipv4'):
rib = []
if prefix != '':
return self.get_global_rib_with_prefix(prefix, rf)
with QuaggaTelnetDaemon(self) as tn:
tn.write('show bgp {0} unicast\n'.format(rf))
tn.read_until(' Network Next Hop Metric '
'LocPrf Weight Path')
read_next = False
for line in tn.read_until('bgpd#').split('\n'):
if line[:2] == '*>':
line = line[2:]
ibgp = False
if line[0] == 'i':
line = line[1:]
ibgp = True
elif not read_next:
continue
elems = line.split()
if len(elems) == 1:
read_next = True
prefix = elems[0]
continue
elif read_next:
nexthop = elems[0]
else:
prefix = elems[0]
nexthop = elems[1]
read_next = False
rib.append({'prefix': prefix, 'nexthop': nexthop,
'ibgp': ibgp})
return rib
def get_global_rib_with_prefix(self, prefix, rf):
rib = []
with QuaggaTelnetDaemon(self) as tn:
tn.write('show bgp {0} unicast {1}\n'.format(rf, prefix))
lines = [line.strip() for line in tn.read_until('bgpd#').split('\n')]
lines.pop(0) # throw away first line contains 'show bgp...'
if lines[0] == '% Network not in table':
return rib
lines = lines[2:]
if lines[0].startswith('Not advertised'):
lines.pop(0) # another useless line
elif lines[0].startswith('Advertised to non peer-group peers:'):
lines = lines[2:] # other useless lines
else:
raise Exception('unknown output format {0}'.format(lines))
aspath = [int(asn) for asn in lines[0].split()]
nexthop = lines[1].split()[0].strip()
info = [s.strip(',') for s in lines[2].split()]
attrs = []
if 'metric' in info:
med = info[info.index('metric') + 1]
attrs.append({'type': BGP_ATTR_TYPE_MULTI_EXIT_DISC, 'metric': int(med)})
if 'localpref' in info:
localpref = info[info.index('localpref') + 1]
attrs.append({'type': BGP_ATTR_TYPE_LOCAL_PREF, 'value': int(localpref)})
rib.append({'prefix': prefix, 'nexthop': nexthop,
'aspath': aspath, 'attrs': attrs})
return rib
def get_neighbor_state(self, peer):
if peer not in self.peers:
raise Exception('not found peer {0}'.format(peer.router_id))
neigh_addr = self.peers[peer]['neigh_addr'].split('/')[0]
with QuaggaTelnetDaemon(self) as tn:
tn.write('show bgp neighbors\n')
neighbor_info = []
curr_info = []
for line in tn.read_until('bgpd#').split('\n'):
line = line.strip()
if line.startswith('BGP neighbor is'):
neighbor_info.append(curr_info)
curr_info = []
curr_info.append(line)
neighbor_info.append(curr_info)
for info in neighbor_info:
if not info[0].startswith('BGP neighbor is'):
continue
idx1 = info[0].index('BGP neighbor is ')
idx2 = info[0].index(',')
n_addr = info[0][idx1+len('BGP neighbor is '):idx2]
if n_addr == neigh_addr:
idx1 = info[2].index('= ')
state = info[2][idx1+len('= '):]
if state.startswith('Idle'):
return BGP_FSM_IDLE
elif state.startswith('Active'):
return BGP_FSM_ACTIVE
elif state.startswith('Established'):
return BGP_FSM_ESTABLISHED
else:
return state
raise Exception('not found peer {0}'.format(peer.router_id))
def send_route_refresh(self):
with QuaggaTelnetDaemon(self) as tn:
tn.write('clear ip bgp * soft\n')
#tn.read_until('bgpd#')
def create_config(self):
self._create_config_bgp()
if self.zebra:
self._create_config_zebra()
def _create_config_bgp(self):
c = CmdBuffer()
c << 'hostname bgpd'
c << 'password zebra'
c << 'router bgp {0}'.format(self.asn)
c << 'bgp router-id {0}'.format(self.router_id)
if any(info['graceful_restart'] for info in self.peers.itervalues()):
c << 'bgp graceful-restart'
version = 4
for peer, info in self.peers.iteritems():
version = netaddr.IPNetwork(info['neigh_addr']).version
n_addr = info['neigh_addr'].split('/')[0]
if version == 6:
c << 'no bgp default ipv4-unicast'
c << 'neighbor {0} remote-as {1}'.format(n_addr, peer.asn)
if info['is_rs_client']:
c << 'neighbor {0} route-server-client'.format(n_addr)
for name, policy in info['policies'].iteritems():
direction = policy['direction']
c << 'neighbor {0} route-map {1} {2}'.format(n_addr, name,
direction)
if info['passwd']:
c << 'neighbor {0} password {1}'.format(n_addr, info['passwd'])
if info['passive']:
c << 'neighbor {0} passive'.format(n_addr)
if version == 6:
c << 'address-family ipv6 unicast'
c << 'neighbor {0} activate'.format(n_addr)
c << 'exit-address-family'
for route in self.routes.itervalues():
if route['rf'] == 'ipv4':
c << 'network {0}'.format(route['prefix'])
elif route['rf'] == 'ipv6':
c << 'address-family ipv6 unicast'
c << 'network {0}'.format(route['prefix'])
c << 'exit-address-family'
else:
raise Exception('unsupported route faily: {0}'.format(route['rf']))
if self.zebra:
if version == 6:
c << 'address-family ipv6 unicast'
c << 'redistribute connected'
c << 'exit-address-family'
else:
c << 'redistribute connected'
for name, policy in self.policies.iteritems():
c << 'access-list {0} {1} {2}'.format(name, policy['type'],
policy['match'])
c << 'route-map {0} permit 10'.format(name)
c << 'match ip address {0}'.format(name)
c << 'set metric {0}'.format(policy['med'])
c << 'debug bgp as4'
c << 'debug bgp fsm'
c << 'debug bgp updates'
c << 'debug bgp events'
c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME)
with open('{0}/bgpd.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new config]'.format(self.name))
print colors.yellow(indent(str(c)))
f.writelines(str(c))
def _create_config_zebra(self):
c = CmdBuffer()
c << 'hostname zebra'
c << 'password zebra'
c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME)
c << 'debug zebra packet'
c << 'debug zebra kernel'
c << 'debug zebra rib'
c << ''
with open('{0}/zebra.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new config]'.format(self.name))
print colors.yellow(indent(str(c)))
f.writelines(str(c))
def reload_config(self):
daemon = []
daemon.append('bgpd')
if self.zebra:
daemon.append('zebra')
for d in daemon:
cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
self.local(cmd)
|
{
"content_hash": "7fa1836f340f128788d9ae966fea87fd",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 89,
"avg_line_length": 37.77290836653386,
"alnum_prop": 0.4880286889568611,
"repo_name": "h-naoto/gobgp",
"id": "e76c7a8b18bd2c359e7bed9e5444032aa0eba31a",
"size": "10094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/lib/quagga.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "1118283"
},
{
"name": "Protocol Buffer",
"bytes": "12091"
},
{
"name": "Python",
"bytes": "366622"
},
{
"name": "Shell",
"bytes": "6672"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
from future.utils import with_metaclass
standard_library.install_aliases() # NOQA
from abc import ABCMeta
from abc import abstractmethod
from abc import abstractproperty
import warnings
from cached_property import cached_property
import chainer
from chainer import cuda
from chainer import functions as F
import numpy as np
class ActionValue(with_metaclass(ABCMeta, object)):
"""Struct that holds state-fixed Q-functions and its subproducts.
Every operation it supports is done in a batch manner.
"""
@abstractproperty
def greedy_actions(self):
"""Get argmax_a Q(s,a)."""
raise NotImplementedError()
@abstractproperty
def max(self):
"""Evaluate max Q(s,a)."""
raise NotImplementedError()
@abstractmethod
def evaluate_actions(self, actions):
"""Evaluate Q(s,a) with a = given actions."""
raise NotImplementedError()
@abstractproperty
def params(self):
"""Learnable parameters of this action value.
Returns:
tuple of chainer.Variable
"""
raise NotImplementedError()
class DiscreteActionValue(ActionValue):
"""Q-function output for discrete action space.
Args:
q_values (ndarray or chainer.Variable):
Array of Q values whose shape is (batchsize, n_actions)
"""
def __init__(self, q_values, q_values_formatter=lambda x: x):
assert isinstance(q_values, chainer.Variable)
self.xp = cuda.get_array_module(q_values.array)
self.q_values = q_values
self.n_actions = q_values.array.shape[1]
self.q_values_formatter = q_values_formatter
@cached_property
def greedy_actions(self):
return chainer.Variable(
self.q_values.array.argmax(axis=1).astype(np.int32))
@cached_property
def max(self):
with chainer.force_backprop_mode():
return F.select_item(self.q_values, self.greedy_actions)
def evaluate_actions(self, actions):
return F.select_item(self.q_values, actions)
def compute_advantage(self, actions):
return self.evaluate_actions(actions) - self.max
def compute_double_advantage(self, actions, argmax_actions):
return (self.evaluate_actions(actions) -
self.evaluate_actions(argmax_actions))
def compute_expectation(self, beta):
return F.sum(F.softmax(beta * self.q_values) * self.q_values, axis=1)
def __repr__(self):
return 'DiscreteActionValue greedy_actions:{} q_values:{}'.format(
self.greedy_actions.array,
self.q_values_formatter(self.q_values.array))
@property
def params(self):
return (self.q_values,)
def __getitem__(self, i):
return DiscreteActionValue(
self.q_values[i], q_values_formatter=self.q_values_formatter)
class DistributionalDiscreteActionValue(ActionValue):
"""distributional Q-function output for discrete action space.
Args:
q_dist (chainer.Variable): Probabilities of atoms. Its shape must be
(batchsize, n_actions, n_atoms).
z_values (ndarray): Values represented by atoms.
Its shape must be (n_atoms,).
"""
def __init__(self, q_dist, z_values, q_values_formatter=lambda x: x):
assert isinstance(q_dist, chainer.Variable)
assert not isinstance(z_values, chainer.Variable)
assert q_dist.ndim == 3
assert z_values.ndim == 1
assert q_dist.shape[2] == z_values.shape[0]
self.xp = cuda.get_array_module(q_dist.array)
self.z_values = z_values
self.q_values = F.sum(F.scale(q_dist, self.z_values, axis=2), axis=2)
self.q_dist = q_dist
self.n_actions = q_dist.array.shape[1]
self.q_values_formatter = q_values_formatter
@cached_property
def greedy_actions(self):
return chainer.Variable(
self.q_values.array.argmax(axis=1).astype(np.int32))
@cached_property
def max(self):
with chainer.force_backprop_mode():
return F.select_item(self.q_values, self.greedy_actions)
@cached_property
def max_as_distribution(self):
"""Return the return distributions of the greedy actions.
Returns:
chainer.Variable: Return distributions. Its shape will be
(batch_size, n_atoms).
"""
with chainer.force_backprop_mode():
return self.q_dist[self.xp.arange(self.q_values.shape[0]),
self.greedy_actions.array]
def evaluate_actions(self, actions):
return F.select_item(self.q_values, actions)
def evaluate_actions_as_distribution(self, actions):
"""Return the return distributions of given actions.
Args:
actions (chainer.Variable or ndarray): Array of action indices.
Its shape must be (batch_size,).
Returns:
chainer.Variable: Return distributions. Its shape will be
(batch_size, n_atoms).
"""
return self.q_dist[self.xp.arange(self.q_values.shape[0]), actions]
def compute_advantage(self, actions):
return self.evaluate_actions(actions) - self.max
def compute_double_advantage(self, actions, argmax_actions):
return (self.evaluate_actions(actions) -
self.evaluate_actions(argmax_actions))
def compute_expectation(self, beta):
return F.sum(F.softmax(beta * self.q_values) * self.q_values, axis=1)
def __repr__(self):
return 'DistributionalDiscreteActionValue greedy_actions:{} q_values:{}'.format( # NOQA
self.greedy_actions.array,
self.q_values_formatter(self.q_values.array))
@property
def params(self):
return (self.q_dist,)
def __getitem__(self, i):
return DistributionalDiscreteActionValue(
self.q_dist[i],
self.z_values,
q_values_formatter=self.q_values_formatter,
)
class QuantileDiscreteActionValue(DiscreteActionValue):
"""Quantile action value for discrete actions.
Args:
quantiles (chainer.Variable): (batch_size, n_taus, n_actions)
q_values_formatter (callable):
"""
def __init__(self, quantiles, q_values_formatter=lambda x: x):
assert quantiles.ndim == 3
self.quantiles = quantiles
self.xp = cuda.get_array_module(quantiles.array)
self.n_actions = quantiles.shape[2]
self.q_values_formatter = q_values_formatter
@cached_property
def q_values(self):
with chainer.force_backprop_mode():
return F.mean(self.quantiles, axis=1)
def evaluate_actions_as_quantiles(self, actions):
"""Return the return quantiles of given actions.
Args:
actions (chainer.Variable or ndarray): Array of action indices.
Its shape must be (batch_size,).
Returns:
chainer.Variable: Return quantiles. Its shape will be
(batch_size, n_taus).
"""
if isinstance(actions, chainer.Variable):
actions = actions.array
return self.quantiles[
self.xp.arange(self.quantiles.shape[0]), :, actions]
def __repr__(self):
return 'QuantileDiscreteActionValue greedy_actions:{} q_values:{}'.format( # NOQA
self.greedy_actions.array,
self.q_values_formatter(self.q_values.array))
@property
def params(self):
return (self.quantiles,)
def __getitem__(self, i):
return QuantileDiscreteActionValue(
quantiles=self.quantiles[i],
q_values_formatter=self.q_values_formatter,
)
class QuadraticActionValue(ActionValue):
"""Q-function output for continuous action space.
See: http://arxiv.org/abs/1603.00748
Define a Q(s,a) with A(s,a) in a quadratic form.
Q(s,a) = V(s,a) + A(s,a)
A(s,a) = -1/2 (u - mu(s))^T P(s) (u - mu(s))
Args:
mu (chainer.Variable): mu(s), actions that maximize A(s,a)
mat (chainer.Variable): P(s), coefficient matrices of A(s,a).
It must be positive definite.
v (chainer.Variable): V(s), values of s
min_action (ndarray): mininum action, not batched
max_action (ndarray): maximum action, not batched
"""
def __init__(self, mu, mat, v, min_action=None, max_action=None):
self.xp = cuda.get_array_module(mu.array)
self.mu = mu
self.mat = mat
self.v = v
if min_action is None:
self.min_action = None
else:
self.min_action = self.xp.asarray(min_action, dtype=np.float32)
if max_action is None:
self.max_action = None
else:
self.max_action = self.xp.asarray(max_action, dtype=np.float32)
self.batch_size = self.mu.array.shape[0]
@cached_property
def greedy_actions(self):
with chainer.force_backprop_mode():
a = self.mu
if self.min_action is not None:
a = F.maximum(
self.xp.broadcast_to(self.min_action, a.array.shape), a)
if self.max_action is not None:
a = F.minimum(
self.xp.broadcast_to(self.max_action, a.array.shape), a)
return a
@cached_property
def max(self):
with chainer.force_backprop_mode():
if self.min_action is None and self.max_action is None:
return F.reshape(self.v, (self.batch_size,))
else:
return self.evaluate_actions(self.greedy_actions)
def evaluate_actions(self, actions):
u_minus_mu = actions - self.mu
a = - 0.5 * \
F.matmul(F.matmul(
u_minus_mu[:, None, :], self.mat),
u_minus_mu[:, :, None])[:, 0, 0]
return a + F.reshape(self.v, (self.batch_size,))
def compute_advantage(self, actions):
return self.evaluate_actions(actions) - self.max
def compute_double_advantage(self, actions, argmax_actions):
return (self.evaluate_actions(actions) -
self.evaluate_actions(argmax_actions))
def __repr__(self):
return 'QuadraticActionValue greedy_actions:{} v:{}'.format(
self.greedy_actions.array, self.v.array)
@property
def params(self):
return (self.mu, self.mat, self.v)
def __getitem__(self, i):
return QuadraticActionValue(
self.mu[i],
self.mat[i],
self.v[i],
min_action=self.min_action,
max_action=self.max_action,
)
class SingleActionValue(ActionValue):
"""ActionValue that can evaluate only a single action."""
def __init__(self, evaluator, maximizer=None):
self.evaluator = evaluator
self.maximizer = maximizer
@cached_property
def greedy_actions(self):
with chainer.force_backprop_mode():
return self.maximizer()
@cached_property
def max(self):
with chainer.force_backprop_mode():
return self.evaluator(self.greedy_actions)
def evaluate_actions(self, actions):
return self.evaluator(actions)
def compute_advantage(self, actions):
return self.evaluator(actions) - self.max
def compute_double_advantage(self, actions, argmax_actions):
return (self.evaluate_actions(actions) -
self.evaluate_actions(argmax_actions))
def __repr__(self):
return 'SingleActionValue'
@property
def params(self):
warnings.warn(
'SingleActionValue has no learnable parameters until it'
' is evaluated on some action. If you want to draw a computation'
' graph that outputs SingleActionValue, use the variable returned'
' by its method such as evaluate_actions instead.')
return ()
def __getitem__(self, i):
raise NotImplementedError
|
{
"content_hash": "4e62b4f77d13c91f6e48be07406cdc47",
"timestamp": "",
"source": "github",
"line_count": 374,
"max_line_length": 96,
"avg_line_length": 32.580213903743314,
"alnum_prop": 0.6165777595404186,
"repo_name": "toslunar/chainerrl",
"id": "61f1d217f36f2579d00beacfa20e0ef6c9730e5d",
"size": "12185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chainerrl/action_value.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "837028"
},
{
"name": "Shell",
"bytes": "11208"
}
],
"symlink_target": ""
}
|
from contextlib import contextmanager
import nlp_playground.path
from nlp_playground.data import resolve_data_filename
@contextmanager
def swap_base_path(new_path):
"""
Temporarily changes the base path for unit tests.
"""
nlp_playground.path.CUSTOM_PATH = new_path
yield
nlp_playground.path.CUSTOM_PATH = None
def test_resolve_data_filename():
""" Test resolve_data_filename() """
with swap_base_path('/base'):
expected = '/base/data/module/module.data'
actual = resolve_data_filename('module')
assert expected == actual
|
{
"content_hash": "e8ae9d67a6650bb9695ee513ace9bc25",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 53,
"avg_line_length": 26.5,
"alnum_prop": 0.6895368782161235,
"repo_name": "jamesmishra/nlp-playground",
"id": "1d11643ff3553bcabd90fa29cd33425e5eb4df8a",
"size": "583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nlp_playground/tests/test_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76418"
},
{
"name": "Shell",
"bytes": "2459"
}
],
"symlink_target": ""
}
|
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import os
from os import path
import shutil
import codecs
import random
import plac
import re
import spacy.util
from spacy.en import English
from spacy.tagger import Tagger
from spacy.syntax.util import Config
from spacy.gold import read_json_file
from spacy.gold import GoldParse
from spacy.scorer import Scorer
def score_model(scorer, nlp, raw_text, annot_tuples):
if raw_text is None:
tokens = nlp.tokenizer.tokens_from_list(annot_tuples[1])
else:
tokens = nlp.tokenizer(raw_text)
nlp.tagger(tokens)
gold = GoldParse(tokens, annot_tuples)
scorer.score(tokens, gold)
def _merge_sents(sents):
m_deps = [[], [], [], [], [], []]
m_brackets = []
i = 0
for (ids, words, tags, heads, labels, ner), brackets in sents:
m_deps[0].extend(id_ + i for id_ in ids)
m_deps[1].extend(words)
m_deps[2].extend(tags)
m_deps[3].extend(head + i for head in heads)
m_deps[4].extend(labels)
m_deps[5].extend(ner)
m_brackets.extend((b['first'] + i, b['last'] + i, b['label']) for b in brackets)
i += len(ids)
return [(m_deps, m_brackets)]
def train(Language, gold_tuples, model_dir, n_iter=15, feat_set=u'basic',
seed=0, gold_preproc=False, n_sents=0, corruption_level=0,
beam_width=1, verbose=False,
use_orig_arc_eager=False):
if n_sents > 0:
gold_tuples = gold_tuples[:n_sents]
templates = Tagger.default_templates()
nlp = Language(data_dir=model_dir, tagger=False)
nlp.tagger = Tagger.blank(nlp.vocab, templates)
print("Itn.\tP.Loss\tUAS\tNER F.\tTag %\tToken %")
for itn in range(n_iter):
scorer = Scorer()
loss = 0
for raw_text, sents in gold_tuples:
if gold_preproc:
raw_text = None
else:
sents = _merge_sents(sents)
for annot_tuples, ctnt in sents:
words = annot_tuples[1]
gold_tags = annot_tuples[2]
score_model(scorer, nlp, raw_text, annot_tuples)
if raw_text is None:
tokens = nlp.tokenizer.tokens_from_list(words)
else:
tokens = nlp.tokenizer(raw_text)
loss += nlp.tagger.train(tokens, gold_tags)
random.shuffle(gold_tuples)
print('%d:\t%d\t%.3f\t%.3f\t%.3f\t%.3f' % (itn, loss, scorer.uas, scorer.ents_f,
scorer.tags_acc,
scorer.token_acc))
nlp.end_training(model_dir)
def evaluate(Language, gold_tuples, model_dir, gold_preproc=False, verbose=False,
beam_width=None):
nlp = Language(data_dir=model_dir)
if beam_width is not None:
nlp.parser.cfg.beam_width = beam_width
scorer = Scorer()
for raw_text, sents in gold_tuples:
if gold_preproc:
raw_text = None
else:
sents = _merge_sents(sents)
for annot_tuples, brackets in sents:
if raw_text is None:
tokens = nlp.tokenizer.tokens_from_list(annot_tuples[1])
nlp.tagger(tokens)
nlp.entity(tokens)
nlp.parser(tokens)
else:
tokens = nlp(raw_text, merge_mwes=False)
gold = GoldParse(tokens, annot_tuples)
scorer.score(tokens, gold, verbose=verbose)
return scorer
def write_parses(Language, dev_loc, model_dir, out_loc, beam_width=None):
nlp = Language(data_dir=model_dir)
if beam_width is not None:
nlp.parser.cfg.beam_width = beam_width
gold_tuples = read_json_file(dev_loc)
scorer = Scorer()
out_file = codecs.open(out_loc, 'w', 'utf8')
for raw_text, sents in gold_tuples:
sents = _merge_sents(sents)
for annot_tuples, brackets in sents:
if raw_text is None:
tokens = nlp.tokenizer.tokens_from_list(annot_tuples[1])
nlp.tagger(tokens)
nlp.entity(tokens)
nlp.parser(tokens)
else:
tokens = nlp(raw_text, merge_mwes=False)
gold = GoldParse(tokens, annot_tuples)
scorer.score(tokens, gold, verbose=False)
for t in tokens:
out_file.write(
'%s\t%s\t%s\t%s\n' % (t.orth_, t.tag_, t.head.orth_, t.dep_)
)
return scorer
@plac.annotations(
train_loc=("Location of training file or directory"),
dev_loc=("Location of development file or directory"),
model_dir=("Location of output model directory",),
eval_only=("Skip training, and only evaluate", "flag", "e", bool),
corruption_level=("Amount of noise to add to training data", "option", "c", float),
gold_preproc=("Use gold-standard sentence boundaries in training?", "flag", "g", bool),
out_loc=("Out location", "option", "o", str),
n_sents=("Number of training sentences", "option", "n", int),
n_iter=("Number of training iterations", "option", "i", int),
verbose=("Verbose error reporting", "flag", "v", bool),
debug=("Debug mode", "flag", "d", bool),
)
def main(train_loc, dev_loc, model_dir, n_sents=0, n_iter=15, out_loc="", verbose=False,
debug=False, corruption_level=0.0, gold_preproc=False, eval_only=False):
if not eval_only:
gold_train = list(read_json_file(train_loc))
train(English, gold_train, model_dir,
feat_set='basic' if not debug else 'debug',
gold_preproc=gold_preproc, n_sents=n_sents,
corruption_level=corruption_level, n_iter=n_iter,
verbose=verbose)
#if out_loc:
# write_parses(English, dev_loc, model_dir, out_loc, beam_width=beam_width)
scorer = evaluate(English, list(read_json_file(dev_loc)),
model_dir, gold_preproc=gold_preproc, verbose=verbose)
print('TOK', scorer.token_acc)
print('POS', scorer.tags_acc)
print('UAS', scorer.uas)
print('LAS', scorer.las)
print('NER P', scorer.ents_p)
print('NER R', scorer.ents_r)
print('NER F', scorer.ents_f)
if __name__ == '__main__':
plac.call(main)
|
{
"content_hash": "a15453ad5cc1c465e7be35a89b4c5edc",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 91,
"avg_line_length": 36.35057471264368,
"alnum_prop": 0.5829249011857708,
"repo_name": "lukw00/spaCy",
"id": "9cd8cc011eaf47dcd7a95f5ab1836f5f48571cda",
"size": "6347",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "bin/tagger/train.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "315320"
},
{
"name": "CSS",
"bytes": "39174"
},
{
"name": "Groff",
"bytes": "188349"
},
{
"name": "HTML",
"bytes": "545617"
},
{
"name": "JavaScript",
"bytes": "9925"
},
{
"name": "Makefile",
"bytes": "91656"
},
{
"name": "PostScript",
"bytes": "460967"
},
{
"name": "Python",
"bytes": "433394"
},
{
"name": "Shell",
"bytes": "96067"
}
],
"symlink_target": ""
}
|
"""Tests for compute service."""
import base64
import contextlib
import datetime
import operator
import sys
import testtools
import time
import traceback
import uuid
import mock
import mox
from oslo.config import cfg
from oslo import messaging
import six
from testtools import matchers as testtools_matchers
import nova
from nova import availability_zones
from nova import block_device
from nova import compute
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import manager as compute_manager
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import manager as conductor_manager
from nova import context
from nova import db
from nova import exception
from nova.image import glance
from nova.network import api as network_api
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import compute_node as compute_node_obj
from nova.objects import instance as instance_obj
from nova.objects import instance_group as instance_group_obj
from nova.objects import migration as migration_obj
from nova.objects import quotas as quotas_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import policy
from nova import quota
from nova import test
from nova.tests.compute import fake_resource_tracker
from nova.tests.db import fakes as db_fakes
from nova.tests import fake_block_device
from nova.tests import fake_instance
from nova.tests import fake_instance_actions
from nova.tests import fake_network
from nova.tests import fake_network_cache_model
from nova.tests import fake_notifier
from nova.tests.image import fake as fake_image
from nova.tests import matchers
from nova.tests.objects import test_flavor
from nova.tests.objects import test_migration
from nova.tests.objects import test_network
from nova import utils
from nova.virt import block_device as driver_block_device
from nova.virt import event
from nova.virt import fake
from nova.volume import cinder
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
FAKE_IMAGE_REF = 'fake-image-ref'
NODENAME = 'fakenode1'
def fake_not_implemented(*args, **kwargs):
raise NotImplementedError()
def get_primitive_instance_by_uuid(context, instance_uuid):
"""Helper method to get an instance and then convert it to
a primitive form using jsonutils.
"""
instance = db.instance_get_by_uuid(context, instance_uuid)
return jsonutils.to_primitive(instance)
def unify_instance(instance):
"""Return a dict-like instance for both object-initiated and
model-initiated sources that can reasonably be compared.
"""
newdict = dict()
for k, v in instance.iteritems():
if isinstance(v, datetime.datetime):
# NOTE(danms): DB models and Instance objects have different
# timezone expectations
v = v.replace(tzinfo=None)
elif k == 'fault':
# NOTE(danms): DB models don't have 'fault'
continue
elif k == 'pci_devices':
# NOTE(yonlig.he) pci devices need lazy loading
# fake db does not support it yet.
continue
newdict[k] = v
return newdict
class FakeSchedulerAPI(object):
def run_instance(self, ctxt, request_spec, admin_password,
injected_files, requested_networks, is_first_time,
filter_properties):
pass
def live_migration(self, ctxt, block_migration, disk_over_commit,
instance, dest):
pass
def prep_resize(self, ctxt, instance, instance_type, image, request_spec,
filter_properties, reservations):
pass
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
network_manager='nova.network.manager.FlatManager')
fake.set_nodes([NODENAME])
self.flags(use_local=True, group='conductor')
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
self.compute = importutils.import_object(CONF.compute_manager)
# override tracker with a version that doesn't need the database:
fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
self.compute.driver, NODENAME)
self.compute._resource_tracker_dict[NODENAME] = fake_rt
def fake_get_compute_nodes_in_db(context):
fake_compute_nodes = [{'local_gb': 259,
'vcpus_used': 0,
'deleted': 0,
'hypervisor_type': 'powervm',
'created_at': '2013-04-01T00:27:06.000000',
'local_gb_used': 0,
'updated_at': '2013-04-03T00:35:41.000000',
'hypervisor_hostname': 'fake_phyp1',
'memory_mb_used': 512,
'memory_mb': 131072,
'current_workload': 0,
'vcpus': 16,
'cpu_info': 'ppc64,powervm,3940',
'running_vms': 0,
'free_disk_gb': 259,
'service_id': 7,
'hypervisor_version': 7,
'disk_available_least': 265856,
'deleted_at': None,
'free_ram_mb': 130560,
'metrics': '',
'stats': '',
'id': 2}]
return [compute_node_obj.ComputeNode._from_db_object(
context, compute_node_obj.ComputeNode(), cn)
for cn in fake_compute_nodes]
def fake_compute_node_delete(context, compute_node_id):
self.assertEqual(2, compute_node_id)
self.stubs.Set(self.compute, '_get_compute_nodes_in_db',
fake_get_compute_nodes_in_db)
self.stubs.Set(db, 'compute_node_delete',
fake_compute_node_delete)
self.compute.update_available_resource(
context.get_admin_context())
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id)
self.none_quotas = quotas_obj.Quotas.from_reservations(
self.context, None)
def fake_show(meh, context, id):
if id:
return {'id': id, 'min_disk': None, 'min_ram': None,
'name': 'fake_name',
'status': 'active',
'properties': {'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'something_else': 'meow'}}
else:
raise exception.ImageNotFound(image_id=id)
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
fake_rpcapi = FakeSchedulerAPI()
self.stubs.Set(self.compute, 'scheduler_rpcapi', fake_rpcapi)
fake_network.set_stub_network_methods(self.stubs)
fake_instance_actions.stub_out_action_events(self.stubs)
def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs):
self.assertTrue(ctxt.is_admin)
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
self.stubs.Set(network_api.API, 'get_instance_nw_info',
fake_get_nw_info)
self.stubs.Set(network_api.API, 'allocate_for_instance',
fake_get_nw_info)
self.compute_api = compute.API()
# Just to make long lines short
self.rt = self.compute._get_resource_tracker(NODENAME)
def tearDown(self):
timeutils.clear_time_override()
ctxt = context.get_admin_context()
fake_image.FakeImageService_reset()
instances = db.instance_get_all(ctxt)
for instance in instances:
db.instance_destroy(ctxt, instance['uuid'])
fake.restore_nodes()
super(BaseTestCase, self).tearDown()
def _create_fake_instance(self, params=None, type_name='m1.tiny',
services=False):
"""Create a test instance."""
if not params:
params = {}
def make_fake_sys_meta():
sys_meta = params.pop("system_metadata", {})
inst_type = flavors.get_flavor_by_name(type_name)
for key in flavors.system_metadata_flavor_props:
sys_meta['instance_type_%s' % key] = inst_type[key]
return sys_meta
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['task_state'] = None
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
inst['node'] = NODENAME
type_id = flavors.get_flavor_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = 'x86_64'
inst['os_type'] = 'Linux'
inst['system_metadata'] = make_fake_sys_meta()
inst['locked'] = False
inst['created_at'] = timeutils.utcnow()
inst['updated_at'] = timeutils.utcnow()
inst['launched_at'] = timeutils.utcnow()
inst['security_groups'] = []
inst.update(params)
if services:
_create_service_entries(self.context.elevated(),
{'fake_zone': [inst['host']]})
return db.instance_create(self.context, inst)
def _objectify(self, db_inst):
return instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), db_inst,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
def _create_fake_instance_obj(self, params=None, type_name='m1.tiny'):
db_inst = self._create_fake_instance(params, type_name=type_name)
return self._objectify(db_inst)
def _create_instance_type(self, params=None):
"""Create a test instance type."""
if not params:
params = {}
context = self.context.elevated()
inst = {}
inst['name'] = 'm1.small'
inst['memory_mb'] = 1024
inst['vcpus'] = 1
inst['root_gb'] = 20
inst['ephemeral_gb'] = 10
inst['flavorid'] = '1'
inst['swap'] = 2048
inst['rxtx_factor'] = 1
inst.update(params)
return db.flavor_create(context, inst)['id']
def _create_group(self):
values = {'name': 'testgroup',
'description': 'testgroup',
'user_id': self.user_id,
'project_id': self.project_id}
return db.security_group_create(self.context, values)
def _stub_migrate_server(self):
def _fake_migrate_server(*args, **kwargs):
pass
self.stubs.Set(conductor_manager.ComputeTaskManager,
'migrate_server', _fake_migrate_server)
def _init_aggregate_with_host(self, aggr, aggr_name, zone, host):
if not aggr:
aggr = self.api.create_aggregate(self.context, aggr_name, zone)
aggr = self.api.add_host_to_aggregate(self.context, aggr['id'], host)
return aggr
class ComputeVolumeTestCase(BaseTestCase):
def setUp(self):
super(ComputeVolumeTestCase, self).setUp()
self.volume_id = 'fake'
self.fetched_attempts = 0
self.instance = {
'id': 'fake',
'uuid': 'fake',
'name': 'fake',
'root_device_name': '/dev/vda',
}
self.fake_volume = fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume', 'destination_type': 'volume',
'volume_id': self.volume_id, 'device_name': '/dev/vdb'})
self.instance_object = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(),
fake_instance.fake_db_instance())
self.stubs.Set(self.compute.volume_api, 'get', lambda *a, **kw:
{'id': self.volume_id})
self.stubs.Set(self.compute.driver, 'get_volume_connector',
lambda *a, **kw: None)
self.stubs.Set(self.compute.volume_api, 'initialize_connection',
lambda *a, **kw: {})
self.stubs.Set(self.compute.volume_api, 'terminate_connection',
lambda *a, **kw: None)
self.stubs.Set(self.compute.volume_api, 'attach',
lambda *a, **kw: None)
self.stubs.Set(self.compute.volume_api, 'detach',
lambda *a, **kw: None)
self.stubs.Set(self.compute.volume_api, 'check_attach',
lambda *a, **kw: None)
def store_cinfo(context, *args, **kwargs):
self.cinfo = jsonutils.loads(args[-1].get('connection_info'))
return self.fake_volume
self.stubs.Set(self.compute.conductor_api,
'block_device_mapping_update',
store_cinfo)
self.stubs.Set(self.compute.conductor_api,
'block_device_mapping_update_or_create',
store_cinfo)
self.stubs.Set(db, 'block_device_mapping_create', store_cinfo)
self.stubs.Set(db, 'block_device_mapping_update', store_cinfo)
def test_attach_volume_serial(self):
fake_bdm = block_device_obj.BlockDeviceMapping(**self.fake_volume)
with (mock.patch.object(cinder.API, 'get_volume_encryption_metadata',
return_value={})
) as mock_get_vol_enc:
instance = self._create_fake_instance()
self.compute.attach_volume(self.context, self.volume_id,
'/dev/vdb', instance, bdm=fake_bdm)
self.assertEqual(self.cinfo.get('serial'), self.volume_id)
def test_attach_volume_raises(self):
fake_bdm = block_device_obj.BlockDeviceMapping(**self.fake_volume)
instance = self._create_fake_instance()
def fake_attach(*args, **kwargs):
raise test.TestingException
with contextlib.nested(
mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
'attach'),
mock.patch.object(cinder.API, 'unreserve_volume'),
mock.patch.object(block_device_obj.BlockDeviceMapping,
'destroy')
) as (mock_attach, mock_unreserve, mock_destroy):
mock_attach.side_effect = fake_attach
self.assertRaises(
test.TestingException, self.compute.attach_volume,
self.context, 'fake', '/dev/vdb',
instance, bdm=fake_bdm)
self.assertTrue(mock_unreserve.called)
self.assertTrue(mock_destroy.called)
def test_attach_volume_no_bdm(self):
fake_bdm = block_device_obj.BlockDeviceMapping(**self.fake_volume)
instance = self._create_fake_instance()
with contextlib.nested(
mock.patch.object(block_device_obj.BlockDeviceMapping,
'get_by_volume_id', return_value=fake_bdm),
mock.patch.object(self.compute, '_attach_volume')
) as (mock_get_by_id, mock_attach):
self.compute.attach_volume(self.context, 'fake', '/dev/vdb',
instance, bdm=None)
mock_get_by_id.assert_called_once_with(self.context, 'fake')
self.assertTrue(mock_attach.called)
def test_await_block_device_created_to_slow(self):
def never_get(context, vol_id):
return {
'status': 'creating',
'id': 'blah',
}
self.stubs.Set(self.compute.volume_api, 'get', never_get)
self.assertRaises(exception.VolumeNotCreated,
self.compute._await_block_device_map_created,
self.context, '1', max_tries=2, wait_between=0.1)
def test_await_block_device_created_slow(self):
c = self.compute
def slow_get(context, vol_id):
while self.fetched_attempts < 2:
self.fetched_attempts += 1
return {
'status': 'creating',
'id': 'blah',
}
return {
'status': 'available',
'id': 'blah',
}
self.stubs.Set(c.volume_api, 'get', slow_get)
attempts = c._await_block_device_map_created(self.context, '1',
max_tries=4,
wait_between=0.1)
self.assertEqual(attempts, 3)
def test_boot_volume_serial(self):
with (
mock.patch.object(block_device_obj.BlockDeviceMapping, 'save')
) as mock_save:
block_device_mapping = [
block_device.BlockDeviceDict({
'id': 1,
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': self.volume_id,
'device_name': '/dev/vdb',
'delete_on_termination': False,
})]
prepped_bdm = self.compute._prep_block_device(
self.context, self.instance, block_device_mapping)
mock_save.assert_called_once_with(self.context)
volume_driver_bdm = prepped_bdm['block_device_mapping'][0]
self.assertEqual(volume_driver_bdm['connection_info']['serial'],
self.volume_id)
def test_boot_volume_metadata(self, metadata=True):
def volume_api_get(*args, **kwargs):
if metadata:
return {
'volume_image_metadata': {'vol_test_key': 'vol_test_value'}
}
else:
return {}
self.stubs.Set(self.compute_api.volume_api, 'get', volume_api_get)
block_device_mapping = [{
'id': 1,
'device_name': 'vda',
'no_device': None,
'virtual_name': None,
'snapshot_id': None,
'volume_id': self.volume_id,
'delete_on_termination': False,
}]
image_meta = self.compute_api._get_bdm_image_metadata(
self.context, block_device_mapping)
if metadata:
self.assertEqual(image_meta['vol_test_key'], 'vol_test_value')
else:
self.assertEqual(image_meta, {})
# Test it with new-style BDMs
block_device_mapping = [{
'boot_index': 0,
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': self.volume_id,
'delete_on_termination': False,
}]
image_meta = self.compute_api._get_bdm_image_metadata(
self.context, block_device_mapping, legacy_bdm=False)
if metadata:
self.assertEqual(image_meta['vol_test_key'], 'vol_test_value')
else:
self.assertEqual(image_meta, {})
def test_boot_volume_no_metadata(self):
self.test_boot_volume_metadata(metadata=False)
def test_boot_image_metadata(self, metadata=True):
def image_api_show(*args, **kwargs):
if metadata:
return {
'properties': {'img_test_key': 'img_test_value'}
}
else:
return {}
self.stubs.Set(self.compute_api.image_service, 'show', image_api_show)
block_device_mapping = [{
'boot_index': 0,
'source_type': 'image',
'destination_type': 'local',
'image_id': "fake-image",
'delete_on_termination': True,
}]
image_meta = self.compute_api._get_bdm_image_metadata(
self.context, block_device_mapping, legacy_bdm=False)
if metadata:
self.assertEqual(image_meta['img_test_key'], 'img_test_value')
else:
self.assertEqual(image_meta, {})
def test_boot_image_no_metadata(self):
self.test_boot_image_metadata(metadata=False)
def test_poll_bandwidth_usage_disabled(self):
ctxt = 'MockContext'
self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
# None of the mocks should be called.
self.mox.ReplayAll()
CONF.bandwidth_poll_interval = 0
self.compute._poll_bandwidth_usage(ctxt)
self.mox.UnsetStubs()
def test_poll_bandwidth_usage_not_implemented(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(self.compute.driver, 'get_all_bw_counters')
self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
self.mox.StubOutWithMock(time, 'time')
self.mox.StubOutWithMock(instance_obj.InstanceList,
'get_by_host')
# Following methods will be called
utils.last_completed_audit_period().AndReturn((0, 0))
time.time().AndReturn(10)
# Note - time called two more times from Log
time.time().AndReturn(20)
time.time().AndReturn(21)
instance_obj.InstanceList.get_by_host(ctxt,
'fake-mini',
use_slave=True).AndReturn([])
self.compute.driver.get_all_bw_counters([]).AndRaise(
NotImplementedError)
self.mox.ReplayAll()
CONF.bandwidth_poll_interval = 1
self.compute._poll_bandwidth_usage(ctxt)
# A second call won't call the stubs again as the bandwidth
# poll is now disabled
self.compute._poll_bandwidth_usage(ctxt)
self.mox.UnsetStubs()
@mock.patch.object(instance_obj.InstanceList, 'get_by_host')
@mock.patch.object(block_device_obj.BlockDeviceMappingList,
'get_by_instance_uuid')
def test_get_host_volume_bdms(self, mock_get_by_inst, mock_get_by_host):
fake_instance = mock.Mock(uuid='fake-instance-uuid')
mock_get_by_host.return_value = [fake_instance]
volume_bdm = mock.Mock(id=1, is_volume=True)
not_volume_bdm = mock.Mock(id=2, is_volume=False)
mock_get_by_inst.return_value = [volume_bdm, not_volume_bdm]
expected_host_bdms = [{'instance': fake_instance,
'instance_bdms': [volume_bdm]}]
got_host_bdms = self.compute._get_host_volume_bdms('fake-context')
mock_get_by_host.assert_called_once_with('fake-context',
self.compute.host)
mock_get_by_inst.assert_called_once_with('fake-context',
'fake-instance-uuid')
self.assertEqual(expected_host_bdms, got_host_bdms)
def test_poll_volume_usage_disabled(self):
ctxt = 'MockContext'
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
# None of the mocks should be called.
self.mox.ReplayAll()
CONF.volume_usage_poll_interval = 0
self.compute._poll_volume_usage(ctxt)
self.mox.UnsetStubs()
def test_poll_volume_usage_returns_no_vols(self):
ctxt = 'MockContext'
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage')
# Following methods are called.
utils.last_completed_audit_period().AndReturn((0, 0))
self.compute._get_host_volume_bdms(ctxt).AndReturn([])
self.mox.ReplayAll()
CONF.volume_usage_poll_interval = 10
self.compute._poll_volume_usage(ctxt)
self.mox.UnsetStubs()
def test_poll_volume_usage_with_data(self):
ctxt = 'MockContext'
self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(self.compute, '_update_volume_usage_cache')
self.stubs.Set(self.compute.driver, 'get_all_volume_usage',
lambda x, y: [3, 4])
# All the mocks are called
utils.last_completed_audit_period().AndReturn((10, 20))
self.compute._get_host_volume_bdms(ctxt).AndReturn([1, 2])
self.compute._update_volume_usage_cache(ctxt, [3, 4])
self.mox.ReplayAll()
CONF.volume_usage_poll_interval = 10
self.compute._poll_volume_usage(ctxt)
self.mox.UnsetStubs()
def test_detach_volume_usage(self):
# Test that detach volume update the volume usage cache table correctly
instance = self._create_fake_instance()
bdm = fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'device_name': '/dev/vdb',
'connection_info': '{}', 'instance_uuid': instance['uuid'],
'source_type': 'volume', 'destination_type': 'volume',
'volume_id': 1})
host_volume_bdms = {'id': 1, 'device_name': '/dev/vdb',
'connection_info': '{}', 'instance_uuid': instance['uuid'],
'volume_id': 1}
self.mox.StubOutWithMock(db, 'block_device_mapping_get_by_volume_id')
self.mox.StubOutWithMock(self.compute.driver, 'block_stats')
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage')
# The following methods will be called
db.block_device_mapping_get_by_volume_id(self.context, 1, []).\
AndReturn(bdm)
self.compute.driver.block_stats(instance['name'], 'vdb').\
AndReturn([1L, 30L, 1L, 20L, None])
self.compute._get_host_volume_bdms(self.context).AndReturn(
host_volume_bdms)
self.compute.driver.get_all_volume_usage(
self.context, host_volume_bdms).AndReturn(
[{'volume': 1,
'rd_req': 1,
'rd_bytes': 10,
'wr_req': 1,
'wr_bytes': 5,
'instance': instance}])
db.block_device_mapping_get_by_volume_id(self.context, 1, []).\
AndReturn(bdm)
self.mox.ReplayAll()
def fake_get_volume_encryption_metadata(self, context, volume_id):
return {}
self.stubs.Set(cinder.API, 'get_volume_encryption_metadata',
fake_get_volume_encryption_metadata)
self.compute.attach_volume(self.context, 1, '/dev/vdb', instance)
# Poll volume usage & then detach the volume. This will update the
# total fields in the volume usage cache.
CONF.volume_usage_poll_interval = 10
self.compute._poll_volume_usage(self.context)
# Check that a volume.usage and volume.attach notification was sent
self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
self.compute.detach_volume(self.context, 1, instance)
# Check that volume.attach, 2 volume.usage, and volume.detach
# notifications were sent
self.assertEqual(4, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('compute.instance.volume.attach', msg.event_type)
msg = fake_notifier.NOTIFICATIONS[2]
self.assertEqual('volume.usage', msg.event_type)
payload = msg.payload
self.assertEqual(instance['uuid'], payload['instance_id'])
self.assertEqual('fake', payload['user_id'])
self.assertEqual('fake', payload['tenant_id'])
self.assertEqual(1, payload['reads'])
self.assertEqual(30, payload['read_bytes'])
self.assertEqual(1, payload['writes'])
self.assertEqual(20, payload['write_bytes'])
self.assertIsNone(payload['availability_zone'])
msg = fake_notifier.NOTIFICATIONS[3]
self.assertEqual('compute.instance.volume.detach', msg.event_type)
# Check the database for the
volume_usages = db.vol_get_usage_by_time(self.context, 0)
self.assertEqual(1, len(volume_usages))
volume_usage = volume_usages[0]
self.assertEqual(0, volume_usage['curr_reads'])
self.assertEqual(0, volume_usage['curr_read_bytes'])
self.assertEqual(0, volume_usage['curr_writes'])
self.assertEqual(0, volume_usage['curr_write_bytes'])
self.assertEqual(1, volume_usage['tot_reads'])
self.assertEqual(30, volume_usage['tot_read_bytes'])
self.assertEqual(1, volume_usage['tot_writes'])
self.assertEqual(20, volume_usage['tot_write_bytes'])
def test_prepare_image_mapping(self):
swap_size = 1
ephemeral_size = 1
instance_type = {'swap': swap_size,
'ephemeral_gb': ephemeral_size}
instance = self._create_fake_instance()
mappings = [
{'virtual': 'ami', 'device': 'sda1'},
{'virtual': 'root', 'device': '/dev/sda1'},
{'virtual': 'swap', 'device': 'sdb4'},
{'virtual': 'ephemeral0', 'device': 'sdc1'},
{'virtual': 'ephemeral1', 'device': 'sdc2'},
]
preped_bdm = self.compute_api._prepare_image_mapping(
instance_type, instance['uuid'], mappings)
expected_result = [
{
'device_name': '/dev/sdb4',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': swap_size
},
{
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': CONF.default_ephemeral_format,
'boot_index': -1,
'volume_size': ephemeral_size
},
{
'device_name': '/dev/sdc2',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': CONF.default_ephemeral_format,
'boot_index': -1,
'volume_size': ephemeral_size
}
]
for expected, got in zip(expected_result, preped_bdm):
self.assertThat(expected, matchers.IsSubDictOf(got))
def test_validate_bdm(self):
def fake_get(self, context, res_id):
return {'id': res_id}
def fake_check_attach(*args, **kwargs):
pass
self.stubs.Set(cinder.API, 'get', fake_get)
self.stubs.Set(cinder.API, 'get_snapshot', fake_get)
self.stubs.Set(cinder.API, 'check_attach',
fake_check_attach)
volume_id = '55555555-aaaa-bbbb-cccc-555555555555'
snapshot_id = '66666666-aaaa-bbbb-cccc-555555555555'
image_id = '77777777-aaaa-bbbb-cccc-555555555555'
instance = self._create_fake_instance()
instance_type = {'swap': 1, 'ephemeral_gb': 2}
mappings = [
{
'device_name': '/dev/sdb4',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': 1
},
{
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'device_type': 'disk',
'volume_id': volume_id,
'guest_format': None,
'boot_index': 1,
'volume_size': 6
},
{
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'snapshot_id': snapshot_id,
'device_type': 'disk',
'guest_format': None,
'boot_index': 0,
'volume_size': 4
},
{
'device_name': '/dev/sda3',
'source_type': 'image',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': None,
'boot_index': 2,
'volume_size': 1
}
]
# Make sure it passes at first
self.compute_api._validate_bdm(self.context, instance,
instance_type, mappings)
# Boot sequence
mappings[2]['boot_index'] = 2
self.assertRaises(exception.InvalidBDMBootSequence,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings)
mappings[2]['boot_index'] = 0
# number of local block_devices
self.flags(max_local_block_devices=1)
self.assertRaises(exception.InvalidBDMLocalsLimit,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings)
ephemerals = [
{
'device_name': '/dev/vdb',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'volume_id': volume_id,
'guest_format': None,
'boot_index': -1,
'volume_size': 1
},
{
'device_name': '/dev/vdc',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'volume_id': volume_id,
'guest_format': None,
'boot_index': -1,
'volume_size': 1
}]
self.flags(max_local_block_devices=4)
# More ephemerals are OK as long as they are not over the size limit
self.compute_api._validate_bdm(self.context, instance,
instance_type, mappings + ephemerals)
# Ephemerals over the size limit
ephemerals[0]['volume_size'] = 3
self.assertRaises(exception.InvalidBDMEphemeralSize,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings + ephemerals)
self.assertRaises(exception.InvalidBDMEphemeralSize,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings + [ephemerals[0]])
# Swap over the size limit
mappings[0]['volume_size'] = 3
self.assertRaises(exception.InvalidBDMSwapSize,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings)
mappings[0]['volume_size'] = 1
additional_swap = [
{
'device_name': '/dev/vdb',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': 1
}]
# More than one swap
self.assertRaises(exception.InvalidBDMFormat,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings + additional_swap)
image_no_size = [
{
'device_name': '/dev/sda4',
'source_type': 'image',
'image_id': image_id,
'destination_type': 'volume',
'boot_index': -1,
'volume_size': None,
}]
self.assertRaises(exception.InvalidBDM,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
mappings + image_no_size)
def test_validate_bdm_media_service_exceptions(self):
instance_type = {'swap': 1, 'ephemeral_gb': 1}
all_mappings = [{'id': 1,
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': self.volume_id,
'device_name': 'vda',
'boot_index': 0,
'delete_on_termination': False}]
# Check that the volume status is 'available' and reject if not
def fake_volume_get_1(self, context, volume_id):
return {'id': volume_id,
'status': 'creating',
'attach_status': 'detached'}
self.stubs.Set(cinder.API, 'get', fake_volume_get_1)
self.assertRaises(exception.InvalidBDMVolume,
self.compute_api._validate_bdm,
self.context, self.instance,
instance_type, all_mappings)
# Check that the volume attach_status is 'detached' and reject if not
def fake_volume_get_2(self, context, volume_id):
return {'id': volume_id,
'status': 'available',
'attach_status': 'attached'}
self.stubs.Set(cinder.API, 'get', fake_volume_get_2)
self.assertRaises(exception.InvalidBDMVolume,
self.compute_api._validate_bdm,
self.context, self.instance,
instance_type, all_mappings)
# Check that the volume status is 'available' and attach_status is
# 'detached' and accept the request if so
def fake_volume_get_3(self, context, volume_id):
return {'id': volume_id,
'status': 'available',
'attach_status': 'detached'}
self.stubs.Set(cinder.API, 'get', fake_volume_get_3)
self.compute_api._validate_bdm(self.context, self.instance,
instance_type, all_mappings)
def test_volume_snapshot_create(self):
self.assertRaises(messaging.ExpectedException,
self.compute.volume_snapshot_create, self.context,
self.instance_object, 'fake_id', {})
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(NotImplementedError,
self.compute.volume_snapshot_create, self.context,
self.instance_object, 'fake_id', {})
def test_volume_snapshot_delete(self):
self.assertRaises(messaging.ExpectedException,
self.compute.volume_snapshot_delete, self.context,
self.instance_object, 'fake_id', 'fake_id2', {})
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(NotImplementedError,
self.compute.volume_snapshot_delete, self.context,
self.instance_object, 'fake_id', 'fake_id2', {})
class ComputeTestCase(BaseTestCase):
def test_wrap_instance_fault(self):
inst = {"uuid": "fake_uuid"}
called = {'fault_added': False}
def did_it_add_fault(*args):
called['fault_added'] = True
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
did_it_add_fault)
@compute_manager.wrap_instance_fault
def failer(self2, context, instance):
raise NotImplementedError()
self.assertRaises(NotImplementedError, failer,
self.compute, self.context, instance=inst)
self.assertTrue(called['fault_added'])
def test_wrap_instance_fault_instance_in_args(self):
inst = {"uuid": "fake_uuid"}
called = {'fault_added': False}
def did_it_add_fault(*args):
called['fault_added'] = True
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
did_it_add_fault)
@compute_manager.wrap_instance_fault
def failer(self2, context, instance):
raise NotImplementedError()
self.assertRaises(NotImplementedError, failer,
self.compute, self.context, inst)
self.assertTrue(called['fault_added'])
def test_wrap_instance_fault_no_instance(self):
inst = {"uuid": "fake_uuid"}
called = {'fault_added': False}
def did_it_add_fault(*args):
called['fault_added'] = True
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
did_it_add_fault)
@compute_manager.wrap_instance_fault
def failer(self2, context, instance):
raise exception.InstanceNotFound(instance_id=instance['uuid'])
self.assertRaises(exception.InstanceNotFound, failer,
self.compute, self.context, inst)
self.assertFalse(called['fault_added'])
def test_wrap_instance_event(self):
inst = {"uuid": "fake_uuid"}
called = {'started': False,
'finished': False}
def did_it_update_start(self2, context, values):
called['started'] = True
def did_it_update_finish(self2, context, values):
called['finished'] = True
self.stubs.Set(conductor_manager.ConductorManager,
'action_event_start', did_it_update_start)
self.stubs.Set(conductor_manager.ConductorManager,
'action_event_finish', did_it_update_finish)
@compute_manager.wrap_instance_event
def fake_event(self, context, instance):
pass
fake_event(self.compute, self.context, instance=inst)
self.assertTrue(called['started'])
self.assertTrue(called['finished'])
def test_wrap_instance_event_log_exception(self):
inst = {"uuid": "fake_uuid"}
called = {'started': False,
'finished': False,
'message': ''}
def did_it_update_start(self2, context, values):
called['started'] = True
def did_it_update_finish(self2, context, values):
called['finished'] = True
called['message'] = values['message']
self.stubs.Set(conductor_manager.ConductorManager,
'action_event_start', did_it_update_start)
self.stubs.Set(conductor_manager.ConductorManager,
'action_event_finish', did_it_update_finish)
@compute_manager.wrap_instance_event
def fake_event(self2, context, instance):
raise exception.NovaException()
self.assertRaises(exception.NovaException, fake_event,
self.compute, self.context, instance=inst)
self.assertTrue(called['started'])
self.assertTrue(called['finished'])
self.assertEqual('An unknown exception occurred.', called['message'])
def test_object_compat(self):
db_inst = fake_instance.fake_db_instance()
@compute_manager.object_compat
def test_fn(_self, context, instance):
self.assertIsInstance(instance, instance_obj.Instance)
self.assertEqual(instance.uuid, db_inst['uuid'])
test_fn(None, self.context, instance=db_inst)
def test_object_compat_more_positional_args(self):
db_inst = fake_instance.fake_db_instance()
@compute_manager.object_compat
def test_fn(_self, context, instance, pos_arg_1, pos_arg_2):
self.assertIsInstance(instance, instance_obj.Instance)
self.assertEqual(instance.uuid, db_inst['uuid'])
self.assertEqual(pos_arg_1, 'fake_pos_arg1')
self.assertEqual(pos_arg_2, 'fake_pos_arg2')
test_fn(None, self.context, db_inst, 'fake_pos_arg1', 'fake_pos_arg2')
def test_create_instance_with_img_ref_associates_config_drive(self):
# Make sure create associates a config drive.
instance = jsonutils.to_primitive(self._create_fake_instance(
params={'config_drive': '1234', }))
try:
self.compute.run_instance(self.context, instance, {}, {},
[], None, None, True, None, False)
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertTrue(instance['config_drive'])
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_create_instance_associates_config_drive(self):
# Make sure create associates a config drive.
instance = jsonutils.to_primitive(self._create_fake_instance(
params={'config_drive': '1234', }))
try:
self.compute.run_instance(self.context, instance, {}, {},
[], None, None, True, None, False)
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertTrue(instance['config_drive'])
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_create_instance_unlimited_memory(self):
# Default of memory limit=None is unlimited.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
params = {"memory_mb": 999999999999}
filter_properties = {'limits': {'memory_mb': None}}
instance = self._create_fake_instance_obj(params)
self.compute.run_instance(self.context, instance, {},
filter_properties, [], None, None, True, None, False)
self.assertEqual(999999999999, self.rt.compute_node['memory_mb_used'])
def test_create_instance_unlimited_disk(self):
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
params = {"root_gb": 999999999999,
"ephemeral_gb": 99999999999}
filter_properties = {'limits': {'disk_gb': None}}
instance = self._create_fake_instance_obj(params)
self.compute.run_instance(self.context, instance, {},
filter_properties, [], None, None, True, None, False)
def test_create_multiple_instances_then_starve(self):
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
filter_properties = {'limits': {'memory_mb': 4096, 'disk_gb': 1000}}
params = {"memory_mb": 1024, "root_gb": 128, "ephemeral_gb": 128}
instance = self._create_fake_instance_obj(params)
self.compute.run_instance(self.context, instance, {},
filter_properties, [], None, None, True, None, False)
self.assertEqual(1024, self.rt.compute_node['memory_mb_used'])
self.assertEqual(256, self.rt.compute_node['local_gb_used'])
params = {"memory_mb": 2048, "root_gb": 256, "ephemeral_gb": 256}
instance = self._create_fake_instance_obj(params)
self.compute.run_instance(self.context, instance, {},
filter_properties, [], None, None, True, None, False)
self.assertEqual(3072, self.rt.compute_node['memory_mb_used'])
self.assertEqual(768, self.rt.compute_node['local_gb_used'])
params = {"memory_mb": 8192, "root_gb": 8192, "ephemeral_gb": 8192}
instance = self._create_fake_instance_obj(params)
self.assertRaises(exception.ComputeResourcesUnavailable,
self.compute.run_instance, self.context, instance,
{}, filter_properties, [], None, None, True, None, False)
def test_create_multiple_instance_with_neutron_port(self):
instance_type = flavors.get_default_flavor()
def fake_is_neutron():
return True
self.stubs.Set(utils, 'is_neutron', fake_is_neutron)
self.assertRaises(exception.MultiplePortsNotApplicable,
self.compute_api.create,
self.context,
instance_type=instance_type,
image_href=None,
max_count=2,
requested_networks=[(None, None, 'adadds')])
def test_create_instance_with_oversubscribed_ram(self):
# Test passing of oversubscribed ram policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
instance_mb = int(total_mem_mb * 1.45)
# build an instance, specifying an amount of memory that exceeds
# total_mem_mb, but is less than the oversubscribed limit:
params = {"memory_mb": instance_mb, "root_gb": 128,
"ephemeral_gb": 128}
instance = self._create_fake_instance_obj(params)
limits = {'memory_mb': oversub_limit_mb}
filter_properties = {'limits': limits}
self.compute.run_instance(self.context, instance, {},
filter_properties, [], None, None, True, None, False)
self.assertEqual(instance_mb, self.rt.compute_node['memory_mb_used'])
def test_create_instance_with_oversubscribed_ram_fail(self):
"""Test passing of oversubscribed ram policy from the scheduler, but
with insufficient memory.
"""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
instance_mb = int(total_mem_mb * 1.55)
# build an instance, specifying an amount of memory that exceeds
# total_mem_mb, but is less than the oversubscribed limit:
params = {"memory_mb": instance_mb, "root_gb": 128,
"ephemeral_gb": 128}
instance = self._create_fake_instance(params)
filter_properties = {'limits': {'memory_mb': oversub_limit_mb}}
self.assertRaises(exception.ComputeResourcesUnavailable,
self.compute.run_instance, self.context, instance, {},
filter_properties, [], None, None, True, None, False)
def test_create_instance_with_oversubscribed_cpu(self):
# Test passing of oversubscribed cpu policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
limits = {'vcpu': 3}
filter_properties = {'limits': limits}
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
self.assertEqual(1, resources['vcpus'])
# build an instance, specifying an amount of memory that exceeds
# total_mem_mb, but is less than the oversubscribed limit:
params = {"memory_mb": 10, "root_gb": 1,
"ephemeral_gb": 1, "vcpus": 2}
instance = self._create_fake_instance_obj(params)
self.compute.run_instance(self.context, instance, {},
filter_properties, [], None, None, True, None, False)
self.assertEqual(2, self.rt.compute_node['vcpus_used'])
# create one more instance:
params = {"memory_mb": 10, "root_gb": 1,
"ephemeral_gb": 1, "vcpus": 1}
instance = self._create_fake_instance_obj(params)
self.compute.run_instance(self.context, instance, {},
filter_properties, [], None, None, True, None, False)
self.assertEqual(3, self.rt.compute_node['vcpus_used'])
# delete the instance:
instance['vm_state'] = vm_states.DELETED
self.rt.update_usage(self.context,
instance=instance)
self.assertEqual(2, self.rt.compute_node['vcpus_used'])
# now oversubscribe vcpus and fail:
params = {"memory_mb": 10, "root_gb": 1,
"ephemeral_gb": 1, "vcpus": 2}
instance = self._create_fake_instance_obj(params)
limits = {'vcpu': 3}
filter_properties = {'limits': limits}
self.assertRaises(exception.ComputeResourcesUnavailable,
self.compute.run_instance, self.context, instance, {},
filter_properties, [], None, None, True, None, False)
def test_create_instance_with_oversubscribed_disk(self):
# Test passing of oversubscribed disk policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
instance_gb = int(total_disk_gb * 1.45)
# build an instance, specifying an amount of disk that exceeds
# total_disk_gb, but is less than the oversubscribed limit:
params = {"root_gb": instance_gb, "memory_mb": 10}
instance = self._create_fake_instance_obj(params)
limits = {'disk_gb': oversub_limit_gb}
filter_properties = {'limits': limits}
self.compute.run_instance(self.context, instance, {},
filter_properties, [], None, None, True, None, False)
self.assertEqual(instance_gb, self.rt.compute_node['local_gb_used'])
def test_create_instance_with_oversubscribed_disk_fail(self):
"""Test passing of oversubscribed disk policy from the scheduler, but
with insufficient disk.
"""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
instance_gb = int(total_disk_gb * 1.55)
# build an instance, specifying an amount of disk that exceeds
# total_disk_gb, but is less than the oversubscribed limit:
params = {"root_gb": instance_gb, "memory_mb": 10}
instance = self._create_fake_instance(params)
limits = {'disk_gb': oversub_limit_gb}
filter_properties = {'limits': limits}
self.assertRaises(exception.ComputeResourcesUnavailable,
self.compute.run_instance, self.context, instance, {},
filter_properties, [], None, None, True, None, False)
def test_create_instance_without_node_param(self):
instance = self._create_fake_instance_obj({'node': None})
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertEqual(NODENAME, instance['node'])
def test_create_instance_no_image(self):
# Create instance with no image provided.
params = {'image_ref': ''}
instance = self._create_fake_instance_obj(params)
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self._assert_state({'vm_state': vm_states.ACTIVE,
'task_state': None})
def test_default_access_ip(self):
self.flags(default_access_ip_network_name='test1')
fake_network.unset_stub_network_methods(self.stubs)
instance = jsonutils.to_primitive(self._create_fake_instance())
orig_update = self.compute._instance_update
# Make sure the access_ip_* updates happen in the same DB
# update as the set to ACTIVE.
def _instance_update(ctxt, instance_uuid, **kwargs):
if kwargs.get('vm_state', None) == vm_states.ACTIVE:
self.assertEqual(kwargs['access_ip_v4'], '192.168.1.100')
self.assertEqual(kwargs['access_ip_v6'], '2001:db8:0:1::1')
return orig_update(ctxt, instance_uuid, **kwargs)
self.stubs.Set(self.compute, '_instance_update', _instance_update)
try:
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertEqual(instance['access_ip_v4'], '192.168.1.100')
self.assertEqual(instance['access_ip_v6'], '2001:db8:0:1::1')
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_no_default_access_ip(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
try:
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instances = db.instance_get_all(self.context)
instance = instances[0]
self.assertFalse(instance['access_ip_v4'])
self.assertFalse(instance['access_ip_v6'])
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_fail_to_schedule_persists(self):
# check the persistence of the ERROR(scheduling) state.
params = {'vm_state': vm_states.ERROR,
'task_state': task_states.SCHEDULING}
self._create_fake_instance(params=params)
#check state is failed even after the periodic poll
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': task_states.SCHEDULING})
def test_run_instance_setup_block_device_mapping_fail(self):
"""block device mapping failure test.
Make sure that when there is a block device mapping problem,
the instance goes to ERROR state, keeping the task state
"""
def fake(*args, **kwargs):
raise exception.InvalidBDM()
self.stubs.Set(nova.compute.manager.ComputeManager,
'_prep_block_device', fake)
instance = self._create_fake_instance()
self.assertRaises(exception.InvalidBDM, self.compute.run_instance,
self.context, instance=instance, request_spec={},
filter_properties={}, requested_networks=[],
injected_files=None, admin_password=None,
is_first_time=True, node=None,
legacy_bdm_in_spec=False)
#check state is failed even after the periodic poll
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
def test_run_instance_spawn_fail(self):
"""spawn failure test.
Make sure that when there is a spawning problem,
the instance goes to ERROR state, keeping the task state.
"""
def fake(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute.driver, 'spawn', fake)
instance = self._create_fake_instance_obj()
self.assertRaises(test.TestingException, self.compute.run_instance,
self.context, instance=instance, request_spec={},
filter_properties={}, requested_networks=[],
injected_files=None, admin_password=None,
is_first_time=True, node=None,
legacy_bdm_in_spec=False)
#check state is failed even after the periodic poll
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
def test_run_instance_dealloc_network_instance_not_found(self):
"""spawn network deallocate test.
Make sure that when an instance is not found during spawn
that the network is deallocated
"""
instance = self._create_fake_instance_obj()
def fake(*args, **kwargs):
raise exception.InstanceNotFound(instance_id="fake")
self.stubs.Set(self.compute.driver, 'spawn', fake)
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
self.compute._deallocate_network(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
def test_run_instance_bails_on_missing_instance(self):
# Make sure that run_instance() will quickly ignore a deleted instance
called = {}
instance = self._create_fake_instance()
def fake_instance_update(self, *a, **args):
called['instance_update'] = True
raise exception.InstanceNotFound(instance_id='foo')
self.stubs.Set(self.compute, '_instance_update', fake_instance_update)
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.assertIn('instance_update', called)
def test_run_instance_bails_on_deleting_instance(self):
# Make sure that run_instance() will quickly ignore a deleting instance
called = {}
instance = self._create_fake_instance()
def fake_instance_update(self, *a, **args):
called['instance_update'] = True
raise exception.UnexpectedDeletingTaskStateError(
expected='scheduling', actual='deleting')
self.stubs.Set(self.compute, '_instance_update', fake_instance_update)
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.assertIn('instance_update', called)
def test_run_instance_bails_on_missing_instance_2(self):
# Make sure that run_instance() will quickly ignore a deleted instance
called = {}
instance = self._create_fake_instance()
def fake_default_block_device_names(self, *a, **args):
called['default_block_device_names'] = True
raise exception.InstanceNotFound(instance_id='foo')
self.stubs.Set(self.compute, '_default_block_device_names',
fake_default_block_device_names)
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.assertIn('default_block_device_names', called)
def test_can_terminate_on_error_state(self):
# Make sure that the instance can be terminated in ERROR state.
#check failed to schedule --> terminate
params = {'vm_state': vm_states.ERROR}
instance = self._create_fake_instance_obj(params=params)
self.compute.terminate_instance(self.context, instance, [], [])
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
self.context, instance['uuid'])
# Double check it's not there for admins, either.
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
self.context.elevated(), instance['uuid'])
def test_run_terminate(self):
# Make sure it is possible to run and terminate instance.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instances = db.instance_get_all(self.context)
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
instances = db.instance_get_all(self.context)
LOG.info(_("After terminating instances: %s"), instances)
self.assertEqual(len(instances), 0)
admin_deleted_context = context.get_admin_context(
read_deleted="only")
instance = db.instance_get_by_uuid(admin_deleted_context,
instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.DELETED)
self.assertIsNone(instance['task_state'])
def test_run_terminate_with_vol_attached(self):
"""Make sure it is possible to run and terminate instance with volume
attached
"""
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instances = db.instance_get_all(self.context)
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
def fake_check_attach(*args, **kwargs):
pass
def fake_reserve_volume(*args, **kwargs):
pass
def fake_volume_get(self, context, volume_id):
return {'id': volume_id}
def fake_terminate_connection(self, context, volume_id, connector):
pass
def fake_detach(self, context, volume_id):
pass
bdms = []
def fake_rpc_reserve_block_device_name(self, context, **kwargs):
bdm = block_device_obj.BlockDeviceMapping(
**{'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 1,
'instance_uuid': instance['uuid'],
'device_name': '/dev/vdc'})
bdm.create(context)
bdms.append(bdm)
self.stubs.Set(cinder.API, 'get', fake_volume_get)
self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
self.stubs.Set(cinder.API, 'reserve_volume',
fake_reserve_volume)
self.stubs.Set(cinder.API, 'terminate_connection',
fake_terminate_connection)
self.stubs.Set(cinder.API, 'detach', fake_detach)
self.stubs.Set(compute_rpcapi.ComputeAPI,
'reserve_block_device_name',
fake_rpc_reserve_block_device_name)
self.compute_api.attach_volume(self.context, instance, 1,
'/dev/vdc')
self.compute.terminate_instance(self.context,
self._objectify(instance), bdms, [])
instances = db.instance_get_all(self.context)
LOG.info(_("After terminating instances: %s"), instances)
self.assertEqual(len(instances), 0)
bdms = db.block_device_mapping_get_all_by_instance(self.context,
instance['uuid'])
self.assertEqual(len(bdms), 0)
def test_run_terminate_no_image(self):
"""Make sure instance started without image (from volume)
can be termintad without issues
"""
params = {'image_ref': ''}
instance = self._create_fake_instance_obj(params)
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self._assert_state({'vm_state': vm_states.ACTIVE,
'task_state': None})
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
instances = db.instance_get_all(self.context)
self.assertEqual(len(instances), 0)
def test_terminate_no_network(self):
# This is as reported in LP bug 1008875
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instances = db.instance_get_all(self.context)
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
self.mox.ReplayAll()
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
instances = db.instance_get_all(self.context)
LOG.info(_("After terminating instances: %s"), instances)
self.assertEqual(len(instances), 0)
def test_run_terminate_timestamps(self):
# Make sure timestamps are set for launched and destroyed.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance['launched_at'] = None
self.assertIsNone(instance['launched_at'])
self.assertIsNone(instance['deleted_at'])
launch = timeutils.utcnow()
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertTrue(instance['launched_at'] > launch)
self.assertIsNone(instance['deleted_at'])
terminate = timeutils.utcnow()
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
with utils.temporary_mutation(self.context, read_deleted='only'):
instance = db.instance_get_by_uuid(self.context,
instance['uuid'])
self.assertTrue(instance['launched_at'] < terminate)
self.assertTrue(instance['deleted_at'] > terminate)
def test_run_terminate_deallocate_net_failure_sets_error_state(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instances = db.instance_get_all(self.context)
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
def _fake_deallocate_network(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute, '_deallocate_network',
_fake_deallocate_network)
try:
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
except test.TestingException:
pass
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ERROR)
def test_stop(self):
# Ensure instance can be stopped.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
inst_uuid = instance['uuid']
extra = ['system_metadata', 'metadata']
inst_obj = instance_obj.Instance.get_by_uuid(self.context,
inst_uuid,
expected_attrs=extra)
self.compute.stop_instance(self.context, instance=inst_obj)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_start(self):
# Ensure instance can be started.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
extra = ['system_metadata', 'metadata']
inst_uuid = instance['uuid']
inst_obj = instance_obj.Instance.get_by_uuid(self.context,
inst_uuid,
expected_attrs=extra)
self.compute.stop_instance(self.context, instance=inst_obj)
inst_obj.task_state = task_states.POWERING_ON
inst_obj.save(self.context)
self.compute.start_instance(self.context, instance=inst_obj)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_stop_start_no_image(self):
params = {'image_ref': ''}
instance = self._create_fake_instance_obj(params)
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.POWERING_OFF})
extra = ['system_metadata', 'metadata']
inst_uuid = instance['uuid']
inst_obj = instance_obj.Instance.get_by_uuid(self.context,
inst_uuid,
expected_attrs=extra)
self.compute.stop_instance(self.context, instance=inst_obj)
inst_obj.task_state = task_states.POWERING_ON
inst_obj.save(self.context)
self.compute.start_instance(self.context, instance=inst_obj)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_rescue(self):
# Ensure instance can be rescued and unrescued.
called = {'rescued': False,
'unrescued': False}
def fake_rescue(self, context, instance_ref, network_info, image_meta,
rescue_password):
called['rescued'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
def fake_unrescue(self, instance_ref, network_info):
called['unrescued'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue',
fake_unrescue)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
db.instance_update(self.context, instance_uuid,
{"task_state": task_states.RESCUING})
self.compute.rescue_instance(self.context, self._objectify(instance),
None)
self.assertTrue(called['rescued'])
db.instance_update(self.context, instance_uuid,
{"task_state": task_states.UNRESCUING})
self.compute.unrescue_instance(self.context,
instance=self._objectify(instance))
self.assertTrue(called['unrescued'])
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_rescue_notifications(self):
# Ensure notifications on instance rescue.
def fake_rescue(self, context, instance_ref, network_info, image_meta,
rescue_password):
pass
self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
fake_notifier.NOTIFICATIONS = []
db.instance_update(self.context, instance_uuid,
{"task_state": task_states.RESCUING})
self.compute.rescue_instance(self.context, self._objectify(instance),
None)
expected_notifications = ['compute.instance.rescue.start',
'compute.instance.exists',
'compute.instance.rescue.end']
self.assertEqual([m.event_type for m in fake_notifier.NOTIFICATIONS],
expected_notifications)
for n, msg in enumerate(fake_notifier.NOTIFICATIONS):
self.assertEqual(msg.event_type, expected_notifications[n])
self.assertEqual(msg.priority, 'INFO')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance_uuid)
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
self.assertIn('display_name', payload)
self.assertIn('created_at', payload)
self.assertIn('launched_at', payload)
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEqual(payload['image_ref_url'], image_ref_url)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertIn('rescue_image_name', msg.payload)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_unrescue_notifications(self):
# Ensure notifications on instance rescue.
def fake_unrescue(self, instance_ref, network_info):
pass
self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue',
fake_unrescue)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
fake_notifier.NOTIFICATIONS = []
db.instance_update(self.context, instance_uuid,
{"task_state": task_states.UNRESCUING})
self.compute.unrescue_instance(self.context,
instance=self._objectify(instance))
expected_notifications = ['compute.instance.unrescue.start',
'compute.instance.unrescue.end']
self.assertEqual([m.event_type for m in fake_notifier.NOTIFICATIONS],
expected_notifications)
for n, msg in enumerate(fake_notifier.NOTIFICATIONS):
self.assertEqual(msg.event_type, expected_notifications[n])
self.assertEqual(msg.priority, 'INFO')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance_uuid)
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
self.assertIn('display_name', payload)
self.assertIn('created_at', payload)
self.assertIn('launched_at', payload)
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEqual(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_rescue_handle_err(self):
# If the driver fails to rescue, instance state should remain the same
# and the exception should be converted to InstanceNotRescuable
instance = jsonutils.to_primitive(self._create_fake_instance())
inst_obj = self._objectify(instance)
self.mox.StubOutWithMock(self.compute, '_get_rescue_image')
self.mox.StubOutWithMock(nova.virt.fake.FakeDriver, 'rescue')
self.compute._get_rescue_image(
mox.IgnoreArg(), inst_obj, mox.IgnoreArg()).AndReturn({})
nova.virt.fake.FakeDriver.rescue(
mox.IgnoreArg(), inst_obj, [], mox.IgnoreArg(), 'password'
).AndRaise(RuntimeError("Try again later"))
self.mox.ReplayAll()
expected_message = ('Instance %s cannot be rescued: '
'Driver Error: Try again later' % inst_obj['uuid'])
inst_obj['vm_state'] = 'some_random_state'
with testtools.ExpectedException(
exception.InstanceNotRescuable, expected_message):
self.compute.rescue_instance(
self.context, instance=inst_obj,
rescue_password='password')
self.assertEqual('some_random_state', inst_obj['vm_state'])
@mock.patch.object(nova.compute.utils, "get_image_metadata")
@mock.patch.object(nova.image.glance, "get_remote_image_service")
@mock.patch.object(nova.virt.fake.FakeDriver, "rescue")
def test_rescue_with_image_specified(self, mock_rescue,
mock_get_remote_image_service, mock_get_image_metadata):
image_ref = "image-ref"
rescue_image_meta = {}
params = {"task_state": task_states.RESCUING}
instance = self._create_fake_instance(params=params)
instance = self._objectify(instance)
ctxt = context.get_admin_context()
mock_context = mock.Mock()
mock_context.elevated.return_value = ctxt
mock_get_image_metadata.return_value = rescue_image_meta
mock_image_service = "image_service"
mock_get_remote_image_service.return_value = (mock_image_service, "id")
self.compute.rescue_instance(mock_context, instance=instance,
rescue_password="password", rescue_image_ref=image_ref)
mock_get_remote_image_service.assert_called_with(ctxt,
image_ref)
mock_get_image_metadata.assert_called_with(ctxt,
mock_image_service,
image_ref, instance)
mock_rescue.assert_called_with(ctxt, instance, [],
rescue_image_meta, 'password')
self.compute.terminate_instance(ctxt,
self._objectify(instance), [], [])
@mock.patch.object(nova.compute.utils, "get_image_metadata")
@mock.patch.object(nova.image.glance, "get_remote_image_service")
@mock.patch.object(nova.virt.fake.FakeDriver, "rescue")
def test_rescue_with_base_image_when_image_not_specified(self,
mock_rescue, mock_get_remote_image_service,
mock_get_image_metadata):
image_ref = "image-ref"
system_meta = {"image_base_image_ref": image_ref}
rescue_image_meta = {}
params = {"task_state": task_states.RESCUING,
"system_metadata": system_meta}
instance = self._create_fake_instance(params=params)
instance = self._objectify(instance)
ctxt = context.get_admin_context()
mock_context = mock.Mock()
mock_context.elevated.return_value = ctxt
mock_get_image_metadata.return_value = rescue_image_meta
mock_image_service = "image_service"
mock_get_remote_image_service.return_value = (mock_image_service, "id")
self.compute.rescue_instance(mock_context, instance=instance,
rescue_password="password")
mock_get_remote_image_service.assert_called_with(ctxt,
image_ref)
mock_get_image_metadata.assert_called_with(ctxt,
mock_image_service,
image_ref, instance)
mock_rescue.assert_called_with(ctxt, instance, [],
rescue_image_meta, 'password')
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_power_on(self):
# Ensure instance can be powered on.
called = {'power_on': False}
def fake_driver_power_on(self, context, instance, network_info,
block_device_info):
called['power_on'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'power_on',
fake_driver_power_on)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
extra = ['system_metadata', 'metadata']
inst_obj = instance_obj.Instance.get_by_uuid(self.context,
instance['uuid'],
expected_attrs=extra)
inst_obj.task_state = task_states.POWERING_ON
inst_obj.save(self.context)
self.compute.start_instance(self.context, instance=inst_obj)
self.assertTrue(called['power_on'])
self.compute.terminate_instance(self.context,
self._objectify(inst_obj), [], [])
def test_power_off(self):
# Ensure instance can be powered off.
called = {'power_off': False}
def fake_driver_power_off(self, instance):
called['power_off'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'power_off',
fake_driver_power_off)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
extra = ['system_metadata', 'metadata']
inst_obj = instance_obj.Instance.get_by_uuid(self.context,
instance['uuid'],
expected_attrs=extra)
inst_obj.task_state = task_states.POWERING_OFF
inst_obj.save(self.context)
self.compute.stop_instance(self.context, instance=inst_obj)
self.assertTrue(called['power_off'])
self.compute.terminate_instance(self.context,
self._objectify(inst_obj), [], [])
def test_pause(self):
# Ensure instance can be paused and unpaused.
instance = self._create_fake_instance()
self.compute.run_instance(self.context,
jsonutils.to_primitive(instance), {}, {}, [], None, None, True,
None, False)
instance = self._objectify(instance)
instance.task_state = task_states.PAUSING
instance.save()
fake_notifier.NOTIFICATIONS = []
self.compute.pause_instance(self.context, instance=instance)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.pause.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.pause.end')
instance.task_state = task_states.UNPAUSING
instance.save()
fake_notifier.NOTIFICATIONS = []
self.compute.unpause_instance(self.context, instance=instance)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.unpause.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.unpause.end')
self.compute.terminate_instance(self.context, instance, [], [])
def test_suspend(self):
# ensure instance can be suspended and resumed.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instance = self._objectify(instance)
instance.task_state = task_states.SUSPENDING
instance.save()
self.compute.suspend_instance(self.context, instance)
instance.task_state = task_states.RESUMING
instance.save()
self.compute.resume_instance(self.context, instance)
self.compute.terminate_instance(self.context, instance, [], [])
def test_suspend_error(self):
# Ensure vm_state is ERROR when suspend error occurs.
def fake(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute.driver, 'suspend', fake)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.assertRaises(test.TestingException,
self.compute.suspend_instance,
self.context,
instance=instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['vm_state'], vm_states.ERROR)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_suspend_not_implemented(self):
# Ensure expected exception is raised and the vm_state of instance
# restore to original value if suspend is not implemented by driver
def fake(*args, **kwargs):
raise NotImplementedError('suspend test')
self.stubs.Set(self.compute.driver, 'suspend', fake)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_state = instance['vm_state']
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.assertRaises(NotImplementedError,
self.compute.suspend_instance,
self.context,
instance=instance)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance_state, instance['vm_state'])
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_rebuild(self):
# Ensure instance can be rebuilt.
instance = jsonutils.to_primitive(self._create_fake_instance())
image_ref = instance['image_ref']
sys_metadata = db.instance_system_metadata_get(self.context,
instance['uuid'])
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(self.context, self._objectify(instance),
image_ref, image_ref,
injected_files=[],
new_pass="new_password",
orig_sys_metadata=sys_metadata,
bdms=[], recreate=False,
on_shared_storage=False)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_rebuild_driver(self):
# Make sure virt drivers can override default rebuild
called = {'rebuild': False}
def fake(**kwargs):
instance = kwargs['instance']
instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
instance.save(expected_task_state=[task_states.REBUILDING])
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(
expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])
called['rebuild'] = True
self.stubs.Set(self.compute.driver, 'rebuild', fake)
instance = jsonutils.to_primitive(self._create_fake_instance())
image_ref = instance['image_ref']
sys_metadata = db.instance_system_metadata_get(self.context,
instance['uuid'])
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(self.context, self._objectify(instance),
image_ref, image_ref,
injected_files=[],
new_pass="new_password",
orig_sys_metadata=sys_metadata,
bdms=[], recreate=False,
on_shared_storage=False)
self.assertTrue(called['rebuild'])
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_rebuild_no_image(self):
# Ensure instance can be rebuilt when started with no image.
params = {'image_ref': ''}
instance = self._create_fake_instance_obj(params)
sys_metadata = db.instance_system_metadata_get(self.context,
instance['uuid'])
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(self.context, self._objectify(instance),
'', '', injected_files=[],
new_pass="new_password",
orig_sys_metadata=sys_metadata, bdms=[],
recreate=False, on_shared_storage=False)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_rebuild_launched_at_time(self):
# Ensure instance can be rebuilt.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
image_ref = instance['image_ref']
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
timeutils.set_time_override(cur_time)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(self.context, self._objectify(instance),
image_ref, image_ref,
injected_files=[],
new_pass="new_password",
orig_sys_metadata={},
bdms=[], recreate=False,
on_shared_storage=False)
instance = db.instance_get_by_uuid(self.context, instance_uuid,)
self.assertEqual(cur_time, instance['launched_at'])
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_rebuild_with_injected_files(self):
# Ensure instance can be rebuilt with injected files.
injected_files = [
('/a/b/c', base64.b64encode('foobarbaz')),
]
self.decoded_files = [
('/a/b/c', 'foobarbaz'),
]
def _spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info):
self.assertEqual(self.decoded_files, injected_files)
self.stubs.Set(self.compute.driver, 'spawn', _spawn)
instance = jsonutils.to_primitive(self._create_fake_instance())
image_ref = instance['image_ref']
sys_metadata = db.instance_system_metadata_get(self.context,
instance['uuid'])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(self.context, self._objectify(instance),
image_ref, image_ref,
injected_files=injected_files,
new_pass="new_password",
orig_sys_metadata=sys_metadata,
bdms=[], recreate=False,
on_shared_storage=False)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def _test_reboot(self, soft,
test_delete=False, test_unrescue=False,
fail_reboot=False, fail_running=False):
reboot_type = soft and 'SOFT' or 'HARD'
task_pending = (soft and task_states.REBOOT_PENDING
or task_states.REBOOT_PENDING_HARD)
task_started = (soft and task_states.REBOOT_STARTED
or task_states.REBOOT_STARTED_HARD)
expected_task = (soft and task_states.REBOOTING
or task_states.REBOOTING_HARD)
expected_tasks = (soft and (task_states.REBOOTING,
task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED)
or (task_states.REBOOTING_HARD,
task_states.REBOOT_PENDING_HARD,
task_states.REBOOT_STARTED_HARD))
# This is a true unit test, so we don't need the network stubs.
fake_network.unset_stub_network_methods(self.stubs)
self.mox.StubOutWithMock(self.compute,
'_get_instance_volume_block_device_info')
self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute, '_instance_update')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute.driver, 'reboot')
# FIXME(comstud): I don't feel like the context needs to
# be elevated at all. Hopefully remove elevated from
# reboot_instance and remove the stub here in a future patch.
# econtext would just become self.context below then.
econtext = self.context.elevated()
db_instance = fake_instance.fake_db_instance(
**dict(uuid='fake-instance',
power_state=power_state.NOSTATE,
vm_state=vm_states.ACTIVE,
task_state=expected_task,
launched_at=timeutils.utcnow()))
instance = instance_obj.Instance._from_db_object(
econtext, instance_obj.Instance(), db_instance)
updated_dbinstance1 = fake_instance.fake_db_instance(
**dict(uuid='updated-instance1',
power_state=10003,
vm_state=vm_states.ACTIVE,
task_state=expected_task,
launched_at=timeutils.utcnow()))
updated_dbinstance2 = fake_instance.fake_db_instance(
**dict(uuid='updated-instance2',
power_state=10003,
vm_state=vm_states.ACTIVE,
task_state=expected_task,
launched_at=timeutils.utcnow()))
if test_unrescue:
instance['vm_state'] = vm_states.RESCUED
instance.obj_reset_changes()
fake_nw_model = network_model.NetworkInfo()
fake_block_dev_info = 'fake_block_dev_info'
fake_power_state1 = 10001
fake_power_state2 = power_state.RUNNING
fake_power_state3 = 10002
# Beginning of calls we expect.
self.mox.StubOutWithMock(self.context, 'elevated')
self.context.elevated().AndReturn(econtext)
self.compute._get_instance_volume_block_device_info(
econtext, instance).AndReturn(fake_block_dev_info)
self.compute._get_instance_nw_info(econtext,
instance).AndReturn(
fake_nw_model)
self.compute._notify_about_instance_usage(econtext,
instance,
'reboot.start')
self.compute._get_power_state(econtext,
instance).AndReturn(fake_power_state1)
db.instance_update_and_get_original(econtext, instance['uuid'],
{'task_state': task_pending,
'expected_task_state': expected_tasks,
'power_state': fake_power_state1},
update_cells=False,
columns_to_join=['system_metadata']
).AndReturn((None,
updated_dbinstance1))
expected_nw_info = fake_nw_model
db.instance_update_and_get_original(econtext,
updated_dbinstance1['uuid'],
{'task_state': task_started,
'expected_task_state': task_pending},
update_cells=False,
columns_to_join=['system_metadata']
).AndReturn((None,
updated_dbinstance1))
# Annoying. driver.reboot is wrapped in a try/except, and
# doesn't re-raise. It eats exception generated by mox if
# this is called with the wrong args, so we have to hack
# around it.
reboot_call_info = {}
expected_call_info = {
'args': (econtext, instance, expected_nw_info,
reboot_type),
'kwargs': {'block_device_info': fake_block_dev_info}}
fault = exception.InstanceNotFound(instance_id='instance-0000')
def fake_reboot(*args, **kwargs):
reboot_call_info['args'] = args
reboot_call_info['kwargs'] = kwargs
# NOTE(sirp): Since `bad_volumes_callback` is a function defined
# within `reboot_instance`, we don't have access to its value and
# can't stub it out, thus we skip that comparison.
kwargs.pop('bad_volumes_callback')
if fail_reboot:
raise fault
self.stubs.Set(self.compute.driver, 'reboot', fake_reboot)
# Power state should be updated again
if not fail_reboot or fail_running:
new_power_state = fake_power_state2
self.compute._get_power_state(econtext,
instance).AndReturn(fake_power_state2)
else:
new_power_state = fake_power_state3
self.compute._get_power_state(econtext,
instance).AndReturn(fake_power_state3)
if test_delete:
fault = exception.InstanceNotFound(
instance_id=instance['uuid'])
db.instance_update_and_get_original(
econtext, updated_dbinstance1['uuid'],
{'power_state': new_power_state,
'task_state': None,
'vm_state': vm_states.ACTIVE},
update_cells=False,
columns_to_join=['system_metadata'],
).AndRaise(fault)
self.compute._notify_about_instance_usage(
econtext,
instance,
'reboot.end')
elif fail_reboot and not fail_running:
db.instance_update_and_get_original(
econtext, updated_dbinstance1['uuid'],
{'vm_state': vm_states.ERROR},
update_cells=False,
columns_to_join=['system_metadata'],
).AndRaise(fault)
else:
db.instance_update_and_get_original(
econtext, updated_dbinstance1['uuid'],
{'power_state': new_power_state,
'task_state': None,
'vm_state': vm_states.ACTIVE},
update_cells=False,
columns_to_join=['system_metadata'],
).AndReturn((None, updated_dbinstance2))
if fail_running:
self.compute._notify_about_instance_usage(econtext, instance,
'reboot.error', fault=fault)
self.compute._notify_about_instance_usage(
econtext,
instance,
'reboot.end')
self.mox.ReplayAll()
if not fail_reboot or fail_running:
self.compute.reboot_instance(self.context, instance=instance,
block_device_info=None,
reboot_type=reboot_type)
else:
self.assertRaises(exception.InstanceNotFound,
self.compute.reboot_instance,
self.context, instance=instance,
block_device_info=None,
reboot_type=reboot_type)
self.assertEqual(expected_call_info, reboot_call_info)
def test_reboot_soft(self):
self._test_reboot(True)
def test_reboot_soft_and_delete(self):
self._test_reboot(True, True)
def test_reboot_soft_and_rescued(self):
self._test_reboot(True, False, True)
def test_reboot_soft_and_delete_and_rescued(self):
self._test_reboot(True, True, True)
def test_reboot_hard(self):
self._test_reboot(False)
def test_reboot_hard_and_delete(self):
self._test_reboot(False, True)
def test_reboot_hard_and_rescued(self):
self._test_reboot(False, False, True)
def test_reboot_hard_and_delete_and_rescued(self):
self._test_reboot(False, True, True)
def test_reboot_fail(self):
self._test_reboot(False, fail_reboot=True)
def test_reboot_fail_running(self):
self._test_reboot(False, fail_reboot=True,
fail_running=True)
def test_get_instance_volume_block_device_info_source_image(self):
bdms = block_device_obj.block_device_make_list(self.context,
[fake_block_device.FakeDbBlockDeviceDict({
'id': 3,
'volume_id': u'4cbc9e62-6ba0-45dd-b647-934942ead7d6',
'instance_uuid': 'fake-instance',
'device_name': '/dev/vda',
'connection_info': '{"driver_volume_type": "rbd"}',
'source_type': 'image',
'destination_type': 'volume',
'image_id': 'fake-image-id-1',
'boot_index': 0
})])
with (mock.patch.object(
block_device_obj.BlockDeviceMappingList,
'get_by_instance_uuid',
return_value=bdms)
) as mock_get_by_instance:
block_device_info = (
self.compute._get_instance_volume_block_device_info(
self.context, self._create_fake_instance())
)
expected = {
'block_device_mapping': [{
'connection_info': {
'driver_volume_type': 'rbd'
},
'mount_device': '/dev/vda',
'delete_on_termination': False
}]
}
self.assertTrue(mock_get_by_instance.called)
self.assertEqual(block_device_info, expected)
def test_get_instance_volume_block_device_info_passed_bdms(self):
bdms = block_device_obj.block_device_make_list(self.context,
[fake_block_device.FakeDbBlockDeviceDict({
'id': 3,
'volume_id': u'4cbc9e62-6ba0-45dd-b647-934942ead7d6',
'device_name': '/dev/vdd',
'connection_info': '{"driver_volume_type": "rbd"}',
'source_type': 'volume',
'destination_type': 'volume'})
])
with (mock.patch.object(
block_device_obj.BlockDeviceMappingList,
'get_by_instance_uuid')) as mock_get_by_instance:
block_device_info = (
self.compute._get_instance_volume_block_device_info(
self.context, self._create_fake_instance(), bdms=bdms)
)
expected = {
'block_device_mapping': [{
'connection_info': {
'driver_volume_type': 'rbd'
},
'mount_device': '/dev/vdd',
'delete_on_termination': False
}]
}
self.assertFalse(mock_get_by_instance.called)
self.assertEqual(block_device_info, expected)
def test_set_admin_password(self):
# Ensure instance can have its admin password set.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
db.instance_update(self.context, instance['uuid'],
{'task_state': task_states.UPDATING_PASSWORD})
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
self.assertEqual(inst_ref['task_state'], task_states.UPDATING_PASSWORD)
self.compute.set_admin_password(self.context, instance, None)
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
self.assertIsNone(inst_ref['task_state'])
self.compute.terminate_instance(self.context,
self._objectify(inst_ref), [], [])
def test_set_admin_password_bad_state(self):
# Test setting password while instance is rebuilding.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
db.instance_update(self.context, instance['uuid'], {
"power_state": power_state.NOSTATE,
})
instance = jsonutils.to_primitive(db.instance_get_by_uuid(
self.context, instance['uuid']))
self.assertEqual(instance['power_state'], power_state.NOSTATE)
def fake_driver_get_info(self2, _instance):
return {'state': power_state.NOSTATE,
'max_mem': 0,
'mem': 0,
'num_cpu': 2,
'cpu_time': 0}
self.stubs.Set(nova.virt.fake.FakeDriver, 'get_info',
fake_driver_get_info)
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.UPDATING_PASSWORD})
self.assertRaises(exception.InstancePasswordSetFailed,
self.compute.set_admin_password,
self.context,
instance, None)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def _do_test_set_admin_password_driver_error(self, exc, expected_vm_state,
expected_task_state,
expected_exception):
"""Ensure expected exception is raised if set_admin_password fails."""
def fake_sleep(_time):
pass
self.stubs.Set(time, 'sleep', fake_sleep)
def fake_driver_set_pass(self2, _instance, _pwd):
raise exc
self.stubs.Set(nova.virt.fake.FakeDriver, 'set_admin_password',
fake_driver_set_pass)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
db.instance_update(self.context, instance['uuid'],
{'task_state': task_states.UPDATING_PASSWORD})
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
self.assertEqual(inst_ref['task_state'], task_states.UPDATING_PASSWORD)
#error raised from the driver should not reveal internal information
#so a new error is raised
self.assertRaises(expected_exception,
self.compute.set_admin_password,
self.context,
instance=jsonutils.to_primitive(inst_ref),
new_pass=None)
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(inst_ref['vm_state'], expected_vm_state)
self.assertEqual(inst_ref['task_state'], expected_task_state)
self.compute.terminate_instance(self.context,
self._objectify(inst_ref), [], [])
def test_set_admin_password_driver_not_authorized(self):
"""Ensure expected exception is raised if set_admin_password not
authorized.
"""
exc = exception.NotAuthorized(_('Internal error'))
expected_exception = exception.InstancePasswordSetFailed
self._do_test_set_admin_password_driver_error(exc,
vm_states.ERROR,
None,
expected_exception)
def test_set_admin_password_driver_not_implemented(self):
"""Ensure expected exception is raised if set_admin_password not
implemented by driver.
"""
exc = NotImplementedError()
expected_exception = NotImplementedError
self._do_test_set_admin_password_driver_error(exc,
vm_states.ACTIVE,
None,
expected_exception)
def test_inject_network_info(self):
# Ensure we can inject network info.
called = {'inject': False}
def fake_driver_inject_network(self, instance, network_info):
called['inject'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'inject_network_info',
fake_driver_inject_network)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
inst_obj = self._objectify(instance)
self.compute.inject_network_info(self.context, instance=inst_obj)
self.assertTrue(called['inject'])
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_reset_network(self):
# Ensure we can reset networking on an instance.
called = {'count': 0}
def fake_driver_reset_network(self, instance):
called['count'] += 1
self.stubs.Set(nova.virt.fake.FakeDriver, 'reset_network',
fake_driver_reset_network)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.compute.reset_network(self.context,
instance=self._objectify(instance))
self.assertEqual(called['count'], 1)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def _get_snapshotting_instance(self):
# Ensure instance can be snapshotted.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
instance = db.instance_update(
self.context, instance['uuid'],
{"task_state": task_states.IMAGE_SNAPSHOT_PENDING})
return self._objectify(instance)
def test_snapshot(self):
inst_obj = self._get_snapshotting_instance()
self.compute.snapshot_instance(self.context, image_id='fakesnap',
instance=inst_obj)
def test_snapshot_no_image(self):
inst_obj = self._get_snapshotting_instance()
inst_obj.image_ref = ''
inst_obj.save()
self.compute.snapshot_instance(self.context, image_id='fakesnap',
instance=inst_obj)
def _test_snapshot_fails(self, raise_during_cleanup):
def fake_snapshot(*args, **kwargs):
raise test.TestingException()
self.fake_image_delete_called = False
def fake_delete(self_, context, image_id):
self.fake_image_delete_called = True
if raise_during_cleanup:
raise Exception()
self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot)
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
inst_obj = self._get_snapshotting_instance()
self.assertRaises(test.TestingException,
self.compute.snapshot_instance,
self.context, image_id='fakesnap',
instance=inst_obj)
self.assertTrue(self.fake_image_delete_called)
self._assert_state({'task_state': None})
def test_snapshot_fails(self):
self._test_snapshot_fails(False)
def test_snapshot_fails_cleanup_ignores_exception(self):
self._test_snapshot_fails(True)
def test_snapshot_fails_with_glance_error(self):
def fake_snapshot(*args, **kwargs):
raise exception.ImageNotFound(image_id='xxx')
self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot)
fake_image.stub_out_image_service(self.stubs)
inst_obj = self._get_snapshotting_instance()
self.compute.snapshot_instance(
self.context, image_id='fakesnap',
instance=inst_obj)
self._assert_state({'task_state': None})
def test_snapshot_handles_cases_when_instance_is_deleted(self):
inst_obj = self._get_snapshotting_instance()
inst_obj.task_state = task_states.DELETING
inst_obj.save()
self.compute.snapshot_instance(self.context, image_id='fakesnap',
instance=inst_obj)
def test_snapshot_handles_cases_when_instance_is_not_found(self):
inst_obj = self._get_snapshotting_instance()
inst_obj2 = instance_obj.Instance.get_by_uuid(self.context,
inst_obj.uuid)
inst_obj2.destroy()
self.compute.snapshot_instance(self.context, image_id='fakesnap',
instance=inst_obj)
def _assert_state(self, state_dict):
"""Assert state of VM is equal to state passed as parameter."""
instances = db.instance_get_all(self.context)
self.assertEqual(len(instances), 1)
if 'vm_state' in state_dict:
self.assertEqual(state_dict['vm_state'], instances[0]['vm_state'])
if 'task_state' in state_dict:
self.assertEqual(state_dict['task_state'],
instances[0]['task_state'])
if 'power_state' in state_dict:
self.assertEqual(state_dict['power_state'],
instances[0]['power_state'])
def test_console_output(self):
# Make sure we can get console output from instance.
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
jsonutils.to_primitive(instance), {}, {}, [], None,
None, True, None, False)
output = self.compute.get_console_output(self.context,
instance=instance, tail_length=None)
self.assertEqual(output, 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE')
self.compute.terminate_instance(self.context, instance, [], [])
def test_console_output_tail(self):
# Make sure we can get console output from instance.
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
jsonutils.to_primitive(instance), {}, {}, [], None,
None, True, None, False)
output = self.compute.get_console_output(self.context,
instance=instance, tail_length=2)
self.assertEqual(output, 'ANOTHER\nLAST LINE')
self.compute.terminate_instance(self.context, instance, [], [])
def test_console_output_not_implemented(self):
def fake_not_implemented(*args, **kwargs):
raise NotImplementedError()
self.stubs.Set(self.compute.driver, 'get_console_output',
fake_not_implemented)
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
jsonutils.to_primitive(instance), {}, {}, [], None,
None, True, None, False)
self.assertRaises(messaging.ExpectedException,
self.compute.get_console_output, self.context,
instance, 0)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(NotImplementedError,
self.compute.get_console_output, self.context,
instance, 0)
self.compute.terminate_instance(self.context, instance, [], [])
def test_novnc_vnc_console(self):
# Make sure we can a vnc console for an instance.
self.flags(vnc_enabled=True)
self.flags(enabled=False, group='spice')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
jsonutils.to_primitive(instance), {}, {}, [], None,
None, True, None, False)
# Try with the full instance
console = self.compute.get_vnc_console(self.context, 'novnc',
instance=instance)
self.assertTrue(console)
self.compute.terminate_instance(self.context, instance, [], [])
def test_validate_console_port_vnc(self):
self.flags(vnc_enabled=True)
self.flags(enabled=True, group='spice')
instance = self._create_fake_instance_obj()
def fake_driver_get_console(*args, **kwargs):
return {'host': "fake_host", 'port': "5900",
'internal_access_path': None}
self.stubs.Set(self.compute.driver, "get_vnc_console",
fake_driver_get_console)
self.assertTrue(self.compute.validate_console_port(
context=self.context, instance=instance, port="5900",
console_type="novnc"))
def test_validate_console_port_spice(self):
self.flags(vnc_enabled=True)
self.flags(enabled=True, group='spice')
instance = self._create_fake_instance_obj()
def fake_driver_get_console(*args, **kwargs):
return {'host': "fake_host", 'port': "5900",
'internal_access_path': None}
self.stubs.Set(self.compute.driver, "get_spice_console",
fake_driver_get_console)
self.assertTrue(self.compute.validate_console_port(
context=self.context, instance=instance, port="5900",
console_type="spice-html5"))
def test_validate_console_port_rdp(self):
self.flags(enabled=True, group='rdp')
instance = self._create_fake_instance_obj()
def fake_driver_get_console(*args, **kwargs):
return {'host': "fake_host", 'port': "5900",
'internal_access_path': None}
self.stubs.Set(self.compute.driver, "get_rdp_console",
fake_driver_get_console)
self.assertTrue(self.compute.validate_console_port(
context=self.context, instance=instance, port="5900",
console_type="rdp-html5"))
def test_validate_console_port_wrong_port(self):
self.flags(vnc_enabled=True)
self.flags(enabled=True, group='spice')
instance = self._create_fake_instance_obj()
def fake_driver_get_console(*args, **kwargs):
return {'host': "fake_host", 'port': "5900",
'internal_access_path': None}
self.stubs.Set(self.compute.driver, "get_vnc_console",
fake_driver_get_console)
self.assertFalse(self.compute.validate_console_port(
context=self.context, instance=instance, port="wrongport",
console_type="spice-html5"))
def test_xvpvnc_vnc_console(self):
# Make sure we can a vnc console for an instance.
self.flags(vnc_enabled=True)
self.flags(enabled=False, group='spice')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
jsonutils.to_primitive(instance), {}, {}, [], None,
None, True, None, False)
console = self.compute.get_vnc_console(self.context, 'xvpvnc',
instance=instance)
self.assertTrue(console)
self.compute.terminate_instance(self.context, instance, [], [])
def test_invalid_vnc_console_type(self):
# Raise useful error if console type is an unrecognised string.
self.flags(vnc_enabled=True)
self.flags(enabled=False, group='spice')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
jsonutils.to_primitive(instance), {}, {}, [], None,
None, True, None, False)
self.assertRaises(messaging.ExpectedException,
self.compute.get_vnc_console,
self.context, 'invalid', instance=instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_vnc_console,
self.context, 'invalid', instance=instance)
self.compute.terminate_instance(self.context, instance, [], [])
def test_missing_vnc_console_type(self):
# Raise useful error is console type is None.
self.flags(vnc_enabled=True)
self.flags(enabled=False, group='spice')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
jsonutils.to_primitive(instance), {}, {}, [], None,
None, True, None, False)
self.assertRaises(messaging.ExpectedException,
self.compute.get_vnc_console,
self.context, None, instance=instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_vnc_console,
self.context, None, instance=instance)
self.compute.terminate_instance(self.context, instance, [], [])
def test_get_vnc_console_not_implemented(self):
self.stubs.Set(self.compute.driver, 'get_vnc_console',
fake_not_implemented)
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
jsonutils.to_primitive(instance), {}, {}, [], None,
None, True, None, False)
self.assertRaises(messaging.ExpectedException,
self.compute.get_vnc_console,
self.context, 'novnc', instance=instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(NotImplementedError,
self.compute.get_vnc_console,
self.context, 'novnc', instance=instance)
self.compute.terminate_instance(self.context, instance, [], [])
def test_spicehtml5_spice_console(self):
# Make sure we can a spice console for an instance.
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='spice')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
jsonutils.to_primitive(instance), {}, {}, [], None,
None, True, None, False)
# Try with the full instance
console = self.compute.get_spice_console(self.context, 'spice-html5',
instance=instance)
self.assertTrue(console)
self.compute.terminate_instance(self.context, instance, [], [])
def test_invalid_spice_console_type(self):
# Raise useful error if console type is an unrecognised string
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='spice')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
jsonutils.to_primitive(instance), {}, {}, [], None,
None, True, None, False)
self.assertRaises(messaging.ExpectedException,
self.compute.get_spice_console,
self.context, 'invalid', instance=instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_spice_console,
self.context, 'invalid', instance=instance)
self.compute.terminate_instance(self.context, instance, [], [])
def test_missing_spice_console_type(self):
# Raise useful error is console type is None
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='spice')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
jsonutils.to_primitive(instance), {}, {}, [], None,
None, True, None, False)
self.assertRaises(messaging.ExpectedException,
self.compute.get_spice_console,
self.context, None, instance=instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_spice_console,
self.context, None, instance=instance)
self.compute.terminate_instance(self.context, instance, [], [])
def test_rdphtml5_rdp_console(self):
# Make sure we can a rdp console for an instance.
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='rdp')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
jsonutils.to_primitive(instance), {}, {}, [], None,
None, True, None, False)
# Try with the full instance
console = self.compute.get_rdp_console(self.context, 'rdp-html5',
instance=instance)
self.assertTrue(console)
self.compute.terminate_instance(self.context, instance, [], [])
def test_invalid_rdp_console_type(self):
# Raise useful error if console type is an unrecognised string
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='rdp')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
jsonutils.to_primitive(instance), {}, {}, [], None,
None, True, None, False)
self.assertRaises(messaging.ExpectedException,
self.compute.get_rdp_console,
self.context, 'invalid', instance=instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_rdp_console,
self.context, 'invalid', instance=instance)
self.compute.terminate_instance(self.context, instance, [], [])
def test_missing_rdp_console_type(self):
# Raise useful error is console type is None
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='rdp')
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
jsonutils.to_primitive(instance), {}, {}, [], None,
None, True, None, False)
self.assertRaises(messaging.ExpectedException,
self.compute.get_rdp_console,
self.context, None, instance=instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_rdp_console,
self.context, None, instance=instance)
self.compute.terminate_instance(self.context, instance, [], [])
def test_vnc_console_instance_not_ready(self):
self.flags(vnc_enabled=True)
self.flags(enabled=False, group='spice')
instance = self._create_fake_instance_obj(
params={'vm_state': vm_states.BUILDING})
def fake_driver_get_console(*args, **kwargs):
raise exception.InstanceNotFound(instance_id=instance['uuid'])
self.stubs.Set(self.compute.driver, "get_vnc_console",
fake_driver_get_console)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.InstanceNotReady,
self.compute.get_vnc_console, self.context, 'novnc',
instance=instance)
def test_spice_console_instance_not_ready(self):
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='spice')
instance = self._create_fake_instance_obj(
params={'vm_state': vm_states.BUILDING})
def fake_driver_get_console(*args, **kwargs):
raise exception.InstanceNotFound(instance_id=instance['uuid'])
self.stubs.Set(self.compute.driver, "get_spice_console",
fake_driver_get_console)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.InstanceNotReady,
self.compute.get_spice_console, self.context, 'spice-html5',
instance=instance)
def test_rdp_console_instance_not_ready(self):
self.flags(vnc_enabled=False)
self.flags(enabled=True, group='rdp')
instance = self._create_fake_instance_obj(
params={'vm_state': vm_states.BUILDING})
def fake_driver_get_console(*args, **kwargs):
raise exception.InstanceNotFound(instance_id=instance['uuid'])
self.stubs.Set(self.compute.driver, "get_rdp_console",
fake_driver_get_console)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(exception.InstanceNotReady,
self.compute.get_rdp_console, self.context, 'rdp-html5',
instance=instance)
def test_diagnostics(self):
# Make sure we can get diagnostics for an instance.
expected_diagnostic = {'cpu0_time': 17300000000,
'memory': 524288,
'vda_errors': -1,
'vda_read': 262144,
'vda_read_req': 112,
'vda_write': 5778432,
'vda_write_req': 488,
'vnet1_rx': 2070139,
'vnet1_rx_drop': 0,
'vnet1_rx_errors': 0,
'vnet1_rx_packets': 26701,
'vnet1_tx': 140208,
'vnet1_tx_drop': 0,
'vnet1_tx_errors': 0,
'vnet1_tx_packets': 662,
}
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context,
jsonutils.to_primitive(instance), {}, {}, [], None,
None, True, None, False)
diagnostics = self.compute.get_diagnostics(self.context,
instance=instance)
self.assertEqual(diagnostics, expected_diagnostic)
self.compute.terminate_instance(self.context, instance, [], [])
def test_add_fixed_ip_usage_notification(self):
def dummy(*args, **kwargs):
pass
self.stubs.Set(network_api.API, 'add_fixed_ip_to_instance',
dummy)
self.stubs.Set(nova.compute.manager.ComputeManager,
'inject_network_info', dummy)
self.stubs.Set(nova.compute.manager.ComputeManager,
'reset_network', dummy)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.compute.add_fixed_ip_to_instance(self.context, network_id=1,
instance=self._objectify(instance))
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_remove_fixed_ip_usage_notification(self):
def dummy(*args, **kwargs):
pass
self.stubs.Set(network_api.API, 'remove_fixed_ip_from_instance',
dummy)
self.stubs.Set(nova.compute.manager.ComputeManager,
'inject_network_info', dummy)
self.stubs.Set(nova.compute.manager.ComputeManager,
'reset_network', dummy)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.compute.remove_fixed_ip_from_instance(self.context, 1,
instance=instance)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_run_instance_usage_notification(self, request_spec={}):
# Ensure run instance generates appropriate usage notification.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
expected_image_name = request_spec.get('image', {}).get('name', '')
self.compute.run_instance(self.context, instance, request_spec,
{}, [], None, None, True, None, False)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
inst_ref = db.instance_get_by_uuid(self.context, instance_uuid)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type, 'compute.instance.create.start')
# The last event is the one with the sugar in it.
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.priority, 'INFO')
self.assertEqual(msg.event_type, 'compute.instance.create.end')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(expected_image_name, payload['image_name'])
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], inst_ref['uuid'])
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
self.assertEqual(payload['state'], 'active')
self.assertIn('display_name', payload)
self.assertIn('created_at', payload)
self.assertIn('launched_at', payload)
self.assertIn('fixed_ips', payload)
self.assertTrue(payload['launched_at'])
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEqual(payload['image_ref_url'], image_ref_url)
self.assertEqual('Success', payload['message'])
self.compute.terminate_instance(self.context,
self._objectify(inst_ref), [], [])
def test_run_instance_image_usage_notification(self):
request_spec = {'image': {'name': 'fake_name', 'key': 'value'}}
self.test_run_instance_usage_notification(request_spec=request_spec)
def test_run_instance_usage_notification_volume_meta(self):
# Volume's image metadata won't contain the image name
request_spec = {'image': {'key': 'value'}}
self.test_run_instance_usage_notification(request_spec=request_spec)
def test_run_instance_end_notification_on_abort(self):
# Test that an end notif is sent if the build is aborted
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
def build_inst_abort(*args, **kwargs):
raise exception.BuildAbortException(reason="already deleted",
instance_uuid=instance_uuid)
self.stubs.Set(self.compute, '_build_instance', build_inst_abort)
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type, 'compute.instance.create.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type, 'compute.instance.create.end')
self.assertEqual('INFO', msg.priority)
payload = msg.payload
message = payload['message']
self.assertTrue(message.find("already deleted") != -1)
def test_run_instance_error_notification_on_reschedule(self):
# Test that error notif is sent if the build got rescheduled
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
def build_inst_fail(*args, **kwargs):
raise exception.RescheduledException(instance_uuid=instance_uuid,
reason="something bad happened")
self.stubs.Set(self.compute, '_build_instance', build_inst_fail)
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type, 'compute.instance.create.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type, 'compute.instance.create.error')
self.assertEqual('ERROR', msg.priority)
payload = msg.payload
message = payload['message']
self.assertTrue(message.find("something bad happened") != -1)
def test_run_instance_error_notification_on_failure(self):
# Test that error notif is sent if build fails hard
instance = jsonutils.to_primitive(self._create_fake_instance())
def build_inst_fail(*args, **kwargs):
raise test.TestingException("i'm dying")
self.stubs.Set(self.compute, '_build_instance', build_inst_fail)
self.assertRaises(test.TestingException, self.compute.run_instance,
self.context, instance, {}, {}, [], None, None, True, None,
False)
self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type, 'compute.instance.create.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type, 'compute.instance.create.error')
self.assertEqual('ERROR', msg.priority)
payload = msg.payload
message = payload['message']
self.assertTrue(message.find("i'm dying") != -1)
def test_terminate_usage_notification(self):
# Ensure terminate_instance generates correct usage notification.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
fake_notifier.NOTIFICATIONS = []
timeutils.set_time_override(cur_time)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 4)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.priority, 'INFO')
self.assertEqual(msg.event_type, 'compute.instance.delete.start')
msg1 = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg1.event_type, 'compute.instance.shutdown.start')
msg1 = fake_notifier.NOTIFICATIONS[2]
self.assertEqual(msg1.event_type, 'compute.instance.shutdown.end')
msg1 = fake_notifier.NOTIFICATIONS[3]
self.assertEqual(msg1.event_type, 'compute.instance.delete.end')
payload = msg1.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance['uuid'])
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
self.assertIn('display_name', payload)
self.assertIn('created_at', payload)
self.assertIn('launched_at', payload)
self.assertIn('terminated_at', payload)
self.assertIn('deleted_at', payload)
self.assertEqual(payload['terminated_at'], timeutils.strtime(cur_time))
self.assertEqual(payload['deleted_at'], timeutils.strtime(cur_time))
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEqual(payload['image_ref_url'], image_ref_url)
def test_run_instance_existing(self):
# Ensure failure when running an instance that already exists.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, [], None,
None, True, None, False)
self.assertRaises(exception.InstanceExists,
self.compute.run_instance,
self.context, instance, {}, {}, [], None, None, True,
None, False)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_run_instance_queries_macs(self):
# run_instance should ask the driver for node mac addresses and pass
# that to the network_api in use.
fake_network.unset_stub_network_methods(self.stubs)
instance = jsonutils.to_primitive(self._create_fake_instance())
macs = set(['01:23:45:67:89:ab'])
self.mox.StubOutWithMock(self.compute.network_api,
"allocate_for_instance")
self.compute.network_api.allocate_for_instance(
mox.IgnoreArg(),
mox.IgnoreArg(),
requested_networks=None,
vpn=False, macs=macs,
security_groups=[], dhcp_options=None).AndReturn(
fake_network.fake_get_instance_nw_info(self.stubs, 1, 1))
self.mox.StubOutWithMock(self.compute.driver, "macs_for_instance")
self.compute.driver.macs_for_instance(instance).AndReturn(macs)
self.mox.ReplayAll()
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
def _create_server_group(self):
group_instance = self._create_fake_instance_obj(
params=dict(host=self.compute.host))
instance_group = instance_group_obj.InstanceGroup(self.context)
instance_group.user_id = self.user_id
instance_group.project_id = self.project_id
instance_group.name = 'messi'
instance_group.uuid = str(uuid.uuid4())
instance_group.members = [group_instance.uuid]
instance_group.policies = ['anti-affinity']
instance_group.create()
return instance_group
def _run_instance_reschedules_on_anti_affinity_violation(self, group,
hint):
instance = jsonutils.to_primitive(self._create_fake_instance())
filter_properties = {'scheduler_hints': {'group': hint}}
self.assertRaises(exception.RescheduledException,
self.compute._build_instance,
self.context, {}, filter_properties,
[], None, None, True, None, instance,
None, False)
def test_run_instance_reschedules_on_anti_affinity_violation_by_name(self):
group = self._create_server_group()
self._run_instance_reschedules_on_anti_affinity_violation(group,
group.name)
def test_run_instance_reschedules_on_anti_affinity_violation_by_uuid(self):
group = self._create_server_group()
self._run_instance_reschedules_on_anti_affinity_violation(group,
group.uuid)
def test_instance_set_to_error_on_uncaught_exception(self):
# Test that instance is set to error state when exception is raised.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.mox.StubOutWithMock(self.compute.network_api,
"allocate_for_instance")
self.mox.StubOutWithMock(self.compute.network_api,
"deallocate_for_instance")
self.compute.network_api.allocate_for_instance(
mox.IgnoreArg(),
mox.IgnoreArg(),
requested_networks=None,
vpn=False, macs=None,
security_groups=[], dhcp_options=None
).AndRaise(messaging.RemoteError())
self.compute.network_api.deallocate_for_instance(
mox.IgnoreArg(),
mox.IgnoreArg(),
requested_networks=None).MultipleTimes()
fake_network.unset_stub_network_methods(self.stubs)
self.mox.ReplayAll()
self.assertRaises(messaging.RemoteError,
self.compute.run_instance,
self.context, instance, {}, {}, None, None, None,
True, None, False)
instance = db.instance_get_by_uuid(context.get_admin_context(),
instance['uuid'])
self.assertEqual(vm_states.ERROR, instance['vm_state'])
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_delete_instance_succedes_on_volume_fail(self):
instance = self._create_fake_instance_obj()
def fake_cleanup_volumes(context, instance):
raise test.TestingException()
self.stubs.Set(self.compute, '_cleanup_volumes',
fake_cleanup_volumes)
self.compute._delete_instance(self.context, instance, [],
self.none_quotas)
def test_delete_instance_keeps_net_on_power_off_fail(self):
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
exp = exception.InstancePowerOffFailure(reason='')
self.compute.driver.destroy(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(exp)
# mox will detect if _deallocate_network gets called unexpectedly
self.mox.ReplayAll()
instance = self._create_fake_instance_obj()
self.assertRaises(exception.InstancePowerOffFailure,
self.compute._delete_instance,
self.context,
instance,
[],
self.none_quotas)
def test_delete_instance_loses_net_on_other_fail(self):
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
exp = test.TestingException()
self.compute.driver.destroy(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(exp)
self.compute._deallocate_network(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
instance = self._create_fake_instance_obj()
self.assertRaises(test.TestingException,
self.compute._delete_instance,
self.context,
instance,
[],
self.none_quotas)
def test_delete_instance_deletes_console_auth_tokens(self):
instance = self._create_fake_instance_obj()
self.flags(vnc_enabled=True)
self.tokens_deleted = False
def fake_delete_tokens(*args, **kwargs):
self.tokens_deleted = True
cauth_rpcapi = self.compute.consoleauth_rpcapi
self.stubs.Set(cauth_rpcapi, 'delete_tokens_for_instance',
fake_delete_tokens)
self.compute._delete_instance(self.context, instance, [],
self.none_quotas)
self.assertTrue(self.tokens_deleted)
def test_delete_instance_deletes_console_auth_tokens_cells(self):
instance = self._create_fake_instance_obj()
self.flags(vnc_enabled=True)
self.flags(enable=True, group='cells')
self.tokens_deleted = False
def fake_delete_tokens(*args, **kwargs):
self.tokens_deleted = True
cells_rpcapi = self.compute.cells_rpcapi
self.stubs.Set(cells_rpcapi, 'consoleauth_delete_tokens',
fake_delete_tokens)
self.compute._delete_instance(self.context, instance,
[], self.none_quotas)
self.assertTrue(self.tokens_deleted)
def test_instance_termination_exception_sets_error(self):
"""Test that we handle InstanceTerminationFailure
which is propagated up from the underlying driver.
"""
instance = self._create_fake_instance_obj()
def fake_delete_instance(context, instance, bdms,
reservations=None):
raise exception.InstanceTerminationFailure(reason='')
self.stubs.Set(self.compute, '_delete_instance',
fake_delete_instance)
self.assertRaises(exception.InstanceTerminationFailure,
self.compute.terminate_instance,
self.context,
self._objectify(instance), [], [])
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ERROR)
def test_network_is_deallocated_on_spawn_failure(self):
# When a spawn fails the network must be deallocated.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.mox.StubOutWithMock(self.compute, "_prep_block_device")
self.compute._prep_block_device(
mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(messaging.RemoteError('', '', ''))
self.mox.ReplayAll()
self.assertRaises(messaging.RemoteError,
self.compute.run_instance,
self.context, instance, {}, {}, None, None, None,
True, None, False)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_lock(self):
# FIXME(comstud): This test is such crap. This is testing
# compute API lock functionality in a test class for the compute
# manager by running an instance. Hello? We should just have
# unit tests in test_compute_api that test the check_instance_lock
# decorator and make sure that appropriate compute_api methods
# have the decorator.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
non_admin_context = context.RequestContext(None,
None,
is_admin=False)
def check_task_state(task_state):
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_state)
# should fail with locked nonadmin context
inst_obj = instance_obj.Instance.get_by_uuid(self.context,
instance['uuid'])
self.compute_api.lock(self.context, inst_obj)
self.assertRaises(exception.InstanceIsLocked,
self.compute_api.reboot,
non_admin_context, inst_obj, 'SOFT')
check_task_state(None)
# should fail with invalid task state
self.compute_api.unlock(self.context, inst_obj)
instance = db.instance_update(self.context, instance_uuid,
{'task_state': task_states.REBOOTING})
inst_obj.refresh()
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.reboot,
non_admin_context, inst_obj, 'SOFT')
check_task_state(task_states.REBOOTING)
# should succeed with admin context
instance = db.instance_update(self.context, instance_uuid,
{'task_state': None})
inst_obj.refresh()
self.compute_api.reboot(self.context, inst_obj, 'SOFT')
check_task_state(task_states.REBOOTING)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def _check_locked_by(self, instance_uuid, locked_by):
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['locked'], locked_by != None)
self.assertEqual(instance['locked_by'], locked_by)
return instance
def test_override_owner_lock(self):
# FIXME(comstud): This test is such crap. This is testing
# compute API lock functionality in a test class for the compute
# manager by running an instance. Hello? We should just have
# unit tests in test_compute_api that test the check_instance_lock
# decorator and make sure that appropriate compute_api methods
# have the decorator.
admin_context = context.RequestContext('admin-user',
'admin-project',
is_admin=True)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
# Ensure that an admin can override the owner lock
inst_obj = self._objectify(instance)
self.compute_api.lock(self.context, inst_obj)
self._check_locked_by(instance_uuid, 'owner')
self.compute_api.unlock(admin_context, inst_obj)
self._check_locked_by(instance_uuid, None)
def test_upgrade_owner_lock(self):
# FIXME(comstud): This test is such crap. This is testing
# compute API lock functionality in a test class for the compute
# manager by running an instance. Hello? We should just have
# unit tests in test_compute_api that test the check_instance_lock
# decorator and make sure that appropriate compute_api methods
# have the decorator.
admin_context = context.RequestContext('admin-user',
'admin-project',
is_admin=True)
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
# Ensure that an admin can upgrade the lock and that
# the owner can no longer unlock
inst_obj = self._objectify(instance)
self.compute_api.lock(self.context, inst_obj)
self.compute_api.lock(admin_context, inst_obj)
self._check_locked_by(instance_uuid, 'admin')
inst_obj.refresh()
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.unlock,
self.context, inst_obj)
self._check_locked_by(instance_uuid, 'admin')
self.compute_api.unlock(admin_context, inst_obj)
self._check_locked_by(instance_uuid, None)
def _test_state_revert(self, instance, operation, pre_task_state,
kwargs=None):
if kwargs is None:
kwargs = {}
# The API would have set task_state, so do that here to test
# that the state gets reverted on failure
db.instance_update(self.context, instance['uuid'],
{"task_state": pre_task_state})
orig_elevated = self.context.elevated
orig_notify = self.compute._notify_about_instance_usage
def _get_an_exception(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.context, 'elevated', _get_an_exception)
self.stubs.Set(self.compute,
'_notify_about_instance_usage', _get_an_exception)
func = getattr(self.compute, operation)
self.assertRaises(test.TestingException,
func, self.context, instance=instance, **kwargs)
# self.context.elevated() is called in tearDown()
self.stubs.Set(self.context, 'elevated', orig_elevated)
self.stubs.Set(self.compute,
'_notify_about_instance_usage', orig_notify)
# Fetch the instance's task_state and make sure it reverted to None.
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertIsNone(instance["task_state"])
def test_state_revert(self):
# ensure that task_state is reverted after a failed operation.
migration = migration_obj.Migration()
migration.instance_uuid = 'b48316c5-71e8-45e4-9884-6c78055b9b13'
migration.new_instance_type_id = '1'
actions = [
("reboot_instance", task_states.REBOOTING,
{'block_device_info': [],
'reboot_type': 'SOFT'}),
("stop_instance", task_states.POWERING_OFF),
("start_instance", task_states.POWERING_ON),
("soft_delete_instance", task_states.SOFT_DELETING,
{'reservations': []}),
("restore_instance", task_states.RESTORING),
("rebuild_instance", task_states.REBUILDING,
{'orig_image_ref': None,
'image_ref': None,
'injected_files': [],
'new_pass': '',
'orig_sys_metadata': {},
'bdms': [],
'recreate': False,
'on_shared_storage': False}),
("set_admin_password", task_states.UPDATING_PASSWORD,
{'new_pass': None}),
("rescue_instance", task_states.RESCUING,
{'rescue_password': None}),
("unrescue_instance", task_states.UNRESCUING),
("revert_resize", task_states.RESIZE_REVERTING,
{'migration': migration,
'reservations': []}),
("prep_resize", task_states.RESIZE_PREP,
{'image': {},
'instance_type': {},
'reservations': [],
'request_spec': {},
'filter_properties': {},
'node': None}),
("resize_instance", task_states.RESIZE_PREP,
{'migration': migration,
'image': {},
'reservations': [],
'instance_type': {}}),
("pause_instance", task_states.PAUSING),
("unpause_instance", task_states.UNPAUSING),
("suspend_instance", task_states.SUSPENDING),
("resume_instance", task_states.RESUMING),
]
want_objects = ['stop_instance', 'start_instance',
'terminate_instance', 'soft_delete_instance',
'revert_resize', 'confirm_resize'
]
self._stub_out_resize_network_methods()
instance = self._create_fake_instance()
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), instance,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
for operation in actions:
if operation[0] in want_objects:
self._test_state_revert(inst_obj, *operation)
else:
self._test_state_revert(instance, *operation)
def _ensure_quota_reservations_committed(self, instance):
"""Mock up commit of quota reservations."""
reservations = list('fake_res')
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
nova.quota.QUOTAS.commit(mox.IgnoreArg(), reservations,
project_id=instance['project_id'],
user_id=instance['user_id'])
self.mox.ReplayAll()
return reservations
def _ensure_quota_reservations_rolledback(self, instance):
"""Mock up rollback of quota reservations."""
reservations = list('fake_res')
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'rollback')
nova.quota.QUOTAS.rollback(mox.IgnoreArg(), reservations,
project_id=instance['project_id'],
user_id=instance['user_id'])
self.mox.ReplayAll()
return reservations
def test_quotas_successful_delete(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
resvs = self._ensure_quota_reservations_committed(instance)
self.compute.terminate_instance(self.context,
self._objectify(instance), bdms=[], reservations=resvs)
def test_quotas_failed_delete(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
def fake_shutdown_instance(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute, '_shutdown_instance',
fake_shutdown_instance)
resvs = self._ensure_quota_reservations_rolledback(instance)
self.assertRaises(test.TestingException,
self.compute.terminate_instance,
self.context, self._objectify(instance),
bdms=[], reservations=resvs)
def test_quotas_successful_soft_delete(self):
instance = self._objectify(self._create_fake_instance(
params=dict(task_state=task_states.SOFT_DELETING)))
resvs = self._ensure_quota_reservations_committed(instance)
self.compute.soft_delete_instance(self.context, instance,
reservations=resvs)
def test_quotas_failed_soft_delete(self):
instance = jsonutils.to_primitive(self._create_fake_instance(
params=dict(task_state=task_states.SOFT_DELETING)))
def fake_soft_delete(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute.driver, 'soft_delete',
fake_soft_delete)
resvs = self._ensure_quota_reservations_rolledback(instance)
self.assertRaises(test.TestingException,
self.compute.soft_delete_instance,
self.context, instance,
reservations=resvs)
def test_quotas_destroy_of_soft_deleted_instance(self):
instance = jsonutils.to_primitive(self._create_fake_instance(
params=dict(vm_state=vm_states.SOFT_DELETED)))
# Termination should be successful, but quota reservations
# rolled back because the instance was in SOFT_DELETED state.
resvs = self._ensure_quota_reservations_rolledback(instance)
self.compute.terminate_instance(self.context,
self._objectify(instance), bdms=[], reservations=resvs)
def _stub_out_resize_network_methods(self):
def fake(cls, ctxt, instance, *args, **kwargs):
pass
self.stubs.Set(network_api.API, 'setup_networks_on_host', fake)
self.stubs.Set(network_api.API, 'migrate_instance_start', fake)
self.stubs.Set(network_api.API, 'migrate_instance_finish', fake)
def _test_finish_resize(self, power_on):
# Contrived test to ensure finish_resize doesn't raise anything and
# also tests resize from ACTIVE or STOPPED state which determines
# if the resized instance is powered on or not.
vm_state = None
if power_on:
vm_state = vm_states.ACTIVE
else:
vm_state = vm_states.STOPPED
params = {'vm_state': vm_state}
instance = self._create_fake_instance_obj(params)
image = 'fake-image'
disk_info = 'fake-disk-info'
instance_type = flavors.get_default_flavor()
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type,
image={}, reservations=[], request_spec={},
filter_properties={}, node=None)
instance.task_state = task_states.RESIZE_MIGRATED
instance.save()
# NOTE(mriedem): make sure prep_resize set old_vm_state correctly
sys_meta = instance.system_metadata
self.assertIn('old_vm_state', sys_meta)
if power_on:
self.assertEqual(vm_states.ACTIVE, sys_meta['old_vm_state'])
else:
self.assertEqual(vm_states.STOPPED, sys_meta['old_vm_state'])
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
orig_mig_save = migration.save
orig_inst_save = instance.save
network_api = self.compute.network_api
conductor_api = self.compute.conductor_api
self.mox.StubOutWithMock(network_api, 'setup_networks_on_host')
self.mox.StubOutWithMock(network_api,
'migrate_instance_finish')
self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
self.mox.StubOutWithMock(self.compute,
'_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute.driver, 'finish_migration')
self.mox.StubOutWithMock(self.compute,
'_get_instance_volume_block_device_info')
self.mox.StubOutWithMock(migration, 'save')
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.context, 'elevated')
def _mig_save(context):
self.assertEqual(migration.status, 'finished')
self.assertEqual(vm_state, instance.vm_state)
self.assertEqual(task_states.RESIZE_FINISH, instance.task_state)
orig_mig_save()
def _instance_save1():
self.assertEqual(instance_type['id'],
instance.instance_type_id)
orig_inst_save()
def _instance_save2(expected_task_state=None):
self.assertEqual(task_states.RESIZE_MIGRATED,
expected_task_state)
self.assertEqual(task_states.RESIZE_FINISH, instance.task_state)
orig_inst_save(expected_task_state=expected_task_state)
def _instance_save3(expected_task_state=None):
self.assertEqual(task_states.RESIZE_FINISH,
expected_task_state)
self.assertEqual(vm_states.RESIZED, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertIn('launched_at', instance.obj_what_changed())
orig_inst_save(expected_task_state=expected_task_state)
# First save to update flavor
instance.save().WithSideEffects(_instance_save1)
network_api.setup_networks_on_host(self.context, instance,
'fake-mini')
network_api.migrate_instance_finish(self.context,
mox.IsA(dict),
mox.IsA(dict))
self.compute._get_instance_nw_info(
self.context, instance).AndReturn('fake-nwinfo1')
# 2nd save to update task state
exp_kwargs = dict(expected_task_state=task_states.RESIZE_MIGRATED)
instance.save(**exp_kwargs).WithSideEffects(_instance_save2)
self.compute._notify_about_instance_usage(
self.context, instance, 'finish_resize.start',
network_info='fake-nwinfo1')
self.compute._get_instance_volume_block_device_info(
self.context, instance,
refresh_conn_info=True).AndReturn('fake-bdminfo')
# nova.conf sets the default flavor to m1.small and the test
# sets the default flavor to m1.tiny so they should be different
# which makes this a resize
self.compute.driver.finish_migration(self.context, migration,
instance, disk_info,
'fake-nwinfo1',
image, True,
'fake-bdminfo', power_on)
# Ensure instance status updates is after the migration finish
self.context.elevated().AndReturn(self.context)
migration.save(self.context).WithSideEffects(_mig_save)
exp_kwargs = dict(expected_task_state=task_states.RESIZE_FINISH)
instance.save(**exp_kwargs).WithSideEffects(_instance_save3)
self.compute._notify_about_instance_usage(
self.context, instance, 'finish_resize.end',
network_info='fake-nwinfo1')
# NOTE(comstud): This actually does the mox.ReplayAll()
reservations = self._ensure_quota_reservations_committed(instance)
self.compute.finish_resize(self.context,
migration=migration,
disk_info=disk_info, image=image, instance=instance,
reservations=reservations)
def test_finish_resize_from_active(self):
self._test_finish_resize(power_on=True)
def test_finish_resize_from_stopped(self):
self._test_finish_resize(power_on=False)
def test_finish_resize_with_volumes(self):
"""Contrived test to ensure finish_resize doesn't raise anything."""
# create instance
instance = self._create_fake_instance_obj()
# create volume
volume_id = 'fake'
volume = {'instance_uuid': None,
'device_name': None,
'id': volume_id}
bdm = block_device_obj.BlockDeviceMapping(
**{'source_type': 'volume',
'destination_type': 'volume',
'volume_id': volume_id,
'instance_uuid': instance['uuid'],
'device_name': '/dev/vdc'})
bdm.create(self.context)
# stub out volume attach
def fake_volume_get(self, context, volume_id):
return volume
self.stubs.Set(cinder.API, "get", fake_volume_get)
def fake_volume_check_attach(self, context, volume_id, instance):
pass
self.stubs.Set(cinder.API, "check_attach", fake_volume_check_attach)
def fake_get_volume_encryption_metadata(self, context, volume_id):
return {}
self.stubs.Set(cinder.API, 'get_volume_encryption_metadata',
fake_get_volume_encryption_metadata)
orig_connection_data = {
'target_discovered': True,
'target_iqn': 'iqn.2010-10.org.openstack:%s.1' % volume_id,
'target_portal': '127.0.0.0.1:3260',
'volume_id': volume_id,
}
connection_info = {
'driver_volume_type': 'iscsi',
'data': orig_connection_data,
}
def fake_init_conn(self, context, volume_id, session):
return connection_info
self.stubs.Set(cinder.API, "initialize_connection", fake_init_conn)
def fake_attach(self, context, volume_id, instance_uuid, device_name):
volume['instance_uuid'] = instance_uuid
volume['device_name'] = device_name
self.stubs.Set(cinder.API, "attach", fake_attach)
# stub out virt driver attach
def fake_get_volume_connector(*args, **kwargs):
return {}
self.stubs.Set(self.compute.driver, 'get_volume_connector',
fake_get_volume_connector)
def fake_attach_volume(*args, **kwargs):
pass
self.stubs.Set(self.compute.driver, 'attach_volume',
fake_attach_volume)
# attach volume to instance
instance_p = obj_base.obj_to_primitive(instance)
self.compute.attach_volume(self.context, volume['id'],
'/dev/vdc', instance_p, bdm=bdm)
# assert volume attached correctly
self.assertEqual(volume['device_name'], '/dev/vdc')
disk_info = db.block_device_mapping_get_all_by_instance(
self.context, instance.uuid)
self.assertEqual(len(disk_info), 1)
for bdm in disk_info:
self.assertEqual(bdm['device_name'], volume['device_name'])
self.assertEqual(bdm['connection_info'],
jsonutils.dumps(connection_info))
# begin resize
instance_type = flavors.get_default_flavor()
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type,
image={}, reservations=[], request_spec={},
filter_properties={}, node=None)
# fake out detach for prep_resize (and later terminate)
def fake_terminate_connection(self, context, volume, connector):
connection_info['data'] = None
self.stubs.Set(cinder.API, "terminate_connection",
fake_terminate_connection)
self._stub_out_resize_network_methods()
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
self.compute.resize_instance(self.context, instance=instance,
migration=migration, image={}, reservations=[],
instance_type=jsonutils.to_primitive(instance_type))
# assert bdm is unchanged
disk_info = db.block_device_mapping_get_all_by_instance(
self.context, instance.uuid)
self.assertEqual(len(disk_info), 1)
for bdm in disk_info:
self.assertEqual(bdm['device_name'], volume['device_name'])
cached_connection_info = jsonutils.loads(bdm['connection_info'])
self.assertEqual(cached_connection_info['data'],
orig_connection_data)
# but connection was terminated
self.assertIsNone(connection_info['data'])
# stub out virt driver finish_migration
def fake(*args, **kwargs):
pass
self.stubs.Set(self.compute.driver, 'finish_migration', fake)
instance.task_state = task_states.RESIZE_MIGRATED
instance.save()
reservations = self._ensure_quota_reservations_committed(instance)
# new initialize connection
new_connection_data = dict(orig_connection_data)
new_iqn = 'iqn.2010-10.org.openstack:%s.2' % volume_id,
new_connection_data['target_iqn'] = new_iqn
def fake_init_conn_with_data(self, context, volume, session):
connection_info['data'] = new_connection_data
return connection_info
self.stubs.Set(cinder.API, "initialize_connection",
fake_init_conn_with_data)
self.compute.finish_resize(self.context,
migration=migration,
disk_info={}, image={}, instance=instance,
reservations=reservations)
# assert volume attached correctly
disk_info = db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid'])
self.assertEqual(len(disk_info), 1)
for bdm in disk_info:
self.assertEqual(bdm['connection_info'],
jsonutils.dumps(connection_info))
# stub out detach
def fake_detach(self, context, volume_uuid):
volume['device_path'] = None
volume['instance_uuid'] = None
self.stubs.Set(cinder.API, "detach", fake_detach)
# clean up
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_finish_resize_handles_error(self):
# Make sure we don't leave the instance in RESIZE on error.
def throw_up(*args, **kwargs):
raise test.TestingException()
def fake(*args, **kwargs):
pass
self.stubs.Set(self.compute.driver, 'finish_migration', throw_up)
self._stub_out_resize_network_methods()
instance = self._create_fake_instance_obj()
reservations = self._ensure_quota_reservations_rolledback(instance)
instance_type = flavors.get_default_flavor()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type,
image={}, reservations=reservations,
request_spec={}, filter_properties={},
node=None)
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
instance.refresh()
instance.task_state = task_states.RESIZE_MIGRATED
instance.save()
self.assertRaises(test.TestingException, self.compute.finish_resize,
self.context,
migration=migration,
disk_info={}, image={}, instance=instance,
reservations=reservations)
# NOTE(comstud): error path doesn't use objects, so our object
# is not updated. Refresh and compare against the DB.
instance.refresh()
self.assertEqual(vm_states.ERROR, instance.vm_state)
def test_rebuild_instance_notification(self):
# Ensure notifications on instance migrate/resize.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
inst_ref = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, inst_ref, {}, {}, None, None,
None, True, None, False)
timeutils.set_time_override(cur_time)
fake_notifier.NOTIFICATIONS = []
instance = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
orig_sys_metadata = db.instance_system_metadata_get(self.context,
inst_ref['uuid'])
image_ref = instance["image_ref"]
new_image_ref = image_ref + '-new_image_ref'
db.instance_update(self.context, inst_ref['uuid'],
{'image_ref': new_image_ref})
password = "new_password"
instance = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.REBUILDING})
self.compute.rebuild_instance(self.context,
self._objectify(instance),
image_ref, new_image_ref,
injected_files=[],
new_pass=password,
orig_sys_metadata=orig_sys_metadata,
bdms=[], recreate=False,
on_shared_storage=False)
instance = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
image_ref_url = glance.generate_image_url(image_ref)
new_image_ref_url = glance.generate_image_url(new_image_ref)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.exists')
self.assertEqual(msg.payload['image_ref_url'], image_ref_url)
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.rebuild.start')
self.assertEqual(msg.payload['image_ref_url'], new_image_ref_url)
self.assertEqual(msg.payload['image_name'], 'fake_name')
msg = fake_notifier.NOTIFICATIONS[2]
self.assertEqual(msg.event_type,
'compute.instance.rebuild.end')
self.assertEqual(msg.priority, 'INFO')
payload = msg.payload
self.assertEqual(payload['image_name'], 'fake_name')
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], inst_ref['uuid'])
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
self.assertIn('display_name', payload)
self.assertIn('created_at', payload)
self.assertIn('launched_at', payload)
self.assertEqual(payload['launched_at'], timeutils.strtime(cur_time))
self.assertEqual(payload['image_ref_url'], new_image_ref_url)
self.compute.terminate_instance(self.context,
self._objectify(inst_ref), [], [])
def test_finish_resize_instance_notification(self):
# Ensure notifications on instance migrate/resize.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
instance = self._create_fake_instance_obj()
new_type = flavors.get_flavor_by_name('m1.small')
new_type = jsonutils.to_primitive(new_type)
new_type_id = new_type['id']
flavor_id = new_type['flavorid']
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
instance.host = 'foo'
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=new_type, image={}, reservations=[],
request_spec={}, filter_properties={}, node=None)
self._stub_out_resize_network_methods()
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
self.compute.resize_instance(self.context, instance=instance,
migration=migration, image={}, instance_type=new_type,
reservations=[])
timeutils.set_time_override(cur_time)
fake_notifier.NOTIFICATIONS = []
self.compute.finish_resize(self.context,
migration=migration, reservations=[],
disk_info={}, image={}, instance=instance)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.finish_resize.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.finish_resize.end')
self.assertEqual(msg.priority, 'INFO')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance.uuid)
self.assertEqual(payload['instance_type'], 'm1.small')
self.assertEqual(str(payload['instance_type_id']), str(new_type_id))
self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
self.assertIn('display_name', payload)
self.assertIn('created_at', payload)
self.assertIn('launched_at', payload)
self.assertEqual(payload['launched_at'], timeutils.strtime(cur_time))
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEqual(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_resize_instance_notification(self):
# Ensure notifications on instance migrate/resize.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
instance = self._create_fake_instance_obj()
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
timeutils.set_time_override(cur_time)
fake_notifier.NOTIFICATIONS = []
instance.host = 'foo'
instance.task_state = task_states.RESIZE_PREP
instance.save()
instance_type = flavors.get_default_flavor()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type, image={}, reservations=[],
request_spec={}, filter_properties={}, node=None)
db.migration_get_by_instance_and_status(self.context.elevated(),
instance.uuid,
'pre-migrating')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.exists')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.resize.prep.start')
msg = fake_notifier.NOTIFICATIONS[2]
self.assertEqual(msg.event_type,
'compute.instance.resize.prep.end')
self.assertEqual(msg.priority, 'INFO')
payload = msg.payload
self.assertEqual(payload['tenant_id'], self.project_id)
self.assertEqual(payload['user_id'], self.user_id)
self.assertEqual(payload['instance_id'], instance.uuid)
self.assertEqual(payload['instance_type'], 'm1.tiny')
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
self.assertEqual(str(payload['instance_type_id']), str(type_id))
flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
self.assertIn('display_name', payload)
self.assertIn('created_at', payload)
self.assertIn('launched_at', payload)
image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
self.assertEqual(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_prep_resize_instance_migration_error_on_same_host(self):
"""Ensure prep_resize raise a migration error if destination is set on
the same source host and allow_resize_to_same_host is false
"""
self.flags(host="foo", allow_resize_to_same_host=False)
instance = self._create_fake_instance_obj()
reservations = self._ensure_quota_reservations_rolledback(instance)
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
instance.host = self.compute.host
instance.save()
instance_type = flavors.get_default_flavor()
self.assertRaises(exception.MigrationError, self.compute.prep_resize,
self.context, instance=instance,
instance_type=instance_type, image={},
reservations=reservations, request_spec={},
filter_properties={}, node=None)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_prep_resize_instance_migration_error_on_none_host(self):
"""Ensure prep_resize raises a migration error if destination host is
not defined
"""
instance = self._create_fake_instance_obj()
reservations = self._ensure_quota_reservations_rolledback(instance)
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
instance.host = None
instance.save()
instance_type = flavors.get_default_flavor()
self.assertRaises(exception.MigrationError, self.compute.prep_resize,
self.context, instance=instance,
instance_type=instance_type, image={},
reservations=reservations, request_spec={},
filter_properties={}, node=None)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_resize_instance_driver_error(self):
# Ensure instance status set to Error on resize error.
def throw_up(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off',
throw_up)
instance = self._create_fake_instance_obj()
instance_type = flavors.get_default_flavor()
reservations = self._ensure_quota_reservations_rolledback(instance)
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
instance.host = 'foo'
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type, image={},
reservations=reservations, request_spec={},
filter_properties={}, node=None)
instance.task_state = task_states.RESIZE_PREP
instance.save()
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
#verify
self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=instance,
migration=migration, image={},
reservations=reservations,
instance_type=jsonutils.to_primitive(instance_type))
# NOTE(comstud): error path doesn't use objects, so our object
# is not updated. Refresh and compare against the DB.
instance.refresh()
self.assertEqual(instance.vm_state, vm_states.ERROR)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_resize_instance_driver_rollback(self):
# Ensure instance status set to Running after rollback.
def throw_up(*args, **kwargs):
raise exception.InstanceFaultRollback(test.TestingException())
self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off',
throw_up)
instance = self._create_fake_instance_obj()
instance_type = flavors.get_default_flavor()
reservations = self._ensure_quota_reservations_rolledback(instance)
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
instance.host = 'foo'
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type, image={},
reservations=reservations, request_spec={},
filter_properties={}, node=None)
instance.task_state = task_states.RESIZE_PREP
instance.save()
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=instance,
migration=migration, image={},
reservations=reservations,
instance_type=jsonutils.to_primitive(instance_type))
# NOTE(comstud): error path doesn't use objects, so our object
# is not updated. Refresh and compare against the DB.
instance.refresh()
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
self.assertIsNone(instance.task_state)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_resize_instance(self):
# Ensure instance can be migrated/resized.
instance = self._create_fake_instance_obj()
instance_type = flavors.get_default_flavor()
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
instance.host = 'foo'
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type, image={}, reservations=[],
request_spec={}, filter_properties={}, node=None)
# verify 'old_vm_state' was set on system_metadata
instance.refresh()
sys_meta = instance.system_metadata
self.assertEqual(vm_states.ACTIVE, sys_meta['old_vm_state'])
self._stub_out_resize_network_methods()
instance.task_state = task_states.RESIZE_PREP
instance.save()
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
with contextlib.nested(
mock.patch.object(block_device_obj.BlockDeviceMappingList,
'get_by_instance_uuid', return_value='fake_bdms'),
mock.patch.object(
self.compute, '_get_instance_volume_block_device_info',
return_value='fake_bdinfo'),
mock.patch.object(self.compute, '_terminate_volume_connections')
) as (mock_get_by_inst_uuid, mock_get_instance_vol_bdinfo,
mock_terminate_vol_conn):
self.compute.resize_instance(self.context, instance=instance,
migration=migration, image={}, reservations=[],
instance_type=jsonutils.to_primitive(instance_type))
mock_get_instance_vol_bdinfo.assert_called_once_with(
self.context, instance, bdms='fake_bdms')
mock_terminate_vol_conn.assert_called_once_with(self.context,
instance, 'fake_bdms')
self.assertEqual(migration.dest_compute, instance.host)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def _test_confirm_resize(self, power_on):
# Common test case method for confirm_resize
def fake(*args, **kwargs):
pass
def fake_confirm_migration_driver(*args, **kwargs):
# Confirm the instance uses the new type in finish_resize
inst = args[1]
sys_meta = inst['system_metadata']
self.assertEqual(sys_meta['instance_type_flavorid'], '3')
old_vm_state = None
p_state = None
if power_on:
old_vm_state = vm_states.ACTIVE
p_state = power_state.RUNNING
else:
old_vm_state = vm_states.STOPPED
p_state = power_state.SHUTDOWN
params = {'vm_state': old_vm_state, 'power_state': p_state}
instance = self._create_fake_instance_obj(params)
self.flags(allow_resize_to_same_host=True)
self.stubs.Set(self.compute.driver, 'finish_migration', fake)
self.stubs.Set(self.compute.driver, 'confirm_migration',
fake_confirm_migration_driver)
self._stub_out_resize_network_methods()
reservations = self._ensure_quota_reservations_committed(instance)
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
# Confirm the instance size before the resize starts
instance.refresh()
instance_type_ref = db.flavor_get(self.context,
instance.instance_type_id)
self.assertEqual(instance_type_ref['flavorid'], '1')
instance.vm_state = old_vm_state
instance.power_state = p_state
instance.save()
new_instance_type_ref = db.flavor_get_by_flavor_id(
self.context, 3)
new_instance_type_p = jsonutils.to_primitive(new_instance_type_ref)
self.compute.prep_resize(self.context,
instance=instance,
instance_type=new_instance_type_p,
image={}, reservations=reservations, request_spec={},
filter_properties={}, node=None)
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
# NOTE(mriedem): ensure prep_resize set old_vm_state in system_metadata
sys_meta = instance.system_metadata
self.assertEqual(old_vm_state, sys_meta['old_vm_state'])
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.resize_instance(self.context, instance=instance,
migration=migration,
image={},
reservations=[],
instance_type=new_instance_type_p)
self.compute.finish_resize(self.context,
migration=migration, reservations=[],
disk_info={}, image={}, instance=instance)
# Prove that the instance size is now the new size
instance_type_ref = db.flavor_get(self.context,
instance.instance_type_id)
self.assertEqual(instance_type_ref['flavorid'], '3')
# Finally, confirm the resize and verify the new flavor is applied
instance.task_state = None
instance.save()
self.compute.confirm_resize(self.context, instance=instance,
reservations=reservations,
migration=migration)
instance.refresh()
instance_type_ref = db.flavor_get(self.context,
instance.instance_type_id)
self.assertEqual(instance_type_ref['flavorid'], '3')
self.assertEqual('fake-mini', migration.source_compute)
self.assertEqual(old_vm_state, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertEqual(p_state, instance.power_state)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_confirm_resize_from_active(self):
self._test_confirm_resize(power_on=True)
def test_confirm_resize_from_stopped(self):
self._test_confirm_resize(power_on=False)
def _test_finish_revert_resize(self, power_on,
remove_old_vm_state=False):
"""Convenience method that does most of the work for the
test_finish_revert_resize tests.
:param power_on -- True if testing resize from ACTIVE state, False if
testing resize from STOPPED state.
:param remove_old_vm_state -- True if testing a case where the
'old_vm_state' system_metadata is not present when the
finish_revert_resize method is called.
"""
def fake(*args, **kwargs):
pass
def fake_finish_revert_migration_driver(*args, **kwargs):
# Confirm the instance uses the old type in finish_revert_resize
inst = args[1]
sys_meta = inst.system_metadata
self.assertEqual(sys_meta['instance_type_flavorid'], '1')
old_vm_state = None
if power_on:
old_vm_state = vm_states.ACTIVE
else:
old_vm_state = vm_states.STOPPED
params = {'vm_state': old_vm_state}
instance = self._create_fake_instance_obj(params)
self.stubs.Set(self.compute.driver, 'finish_migration', fake)
self.stubs.Set(self.compute.driver, 'finish_revert_migration',
fake_finish_revert_migration_driver)
self._stub_out_resize_network_methods()
reservations = self._ensure_quota_reservations_committed(instance)
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
instance.refresh()
instance_type_ref = db.flavor_get(self.context,
instance.instance_type_id)
self.assertEqual(instance_type_ref['flavorid'], '1')
old_vm_state = instance['vm_state']
instance.host = 'foo'
instance.vm_state = old_vm_state
instance.save()
new_instance_type_ref = db.flavor_get_by_flavor_id(
self.context, 3)
new_instance_type_p = jsonutils.to_primitive(new_instance_type_ref)
self.compute.prep_resize(self.context,
instance=instance,
instance_type=new_instance_type_p,
image={}, reservations=reservations, request_spec={},
filter_properties={}, node=None)
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
# NOTE(mriedem): ensure prep_resize set old_vm_state in system_metadata
sys_meta = instance.system_metadata
self.assertEqual(old_vm_state, sys_meta['old_vm_state'])
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.resize_instance(self.context, instance=instance,
migration=migration,
image={},
reservations=[],
instance_type=new_instance_type_p)
self.compute.finish_resize(self.context,
migration=migration, reservations=[],
disk_info={}, image={}, instance=instance)
# Prove that the instance size is now the new size
instance_type_ref = db.flavor_get(self.context,
instance['instance_type_id'])
self.assertEqual(instance_type_ref['flavorid'], '3')
instance.task_state = task_states.RESIZE_REVERTING
instance.save()
self.compute.revert_resize(self.context,
migration=migration, instance=instance,
reservations=reservations)
instance.refresh()
if remove_old_vm_state:
# need to wipe out the old_vm_state from system_metadata
# before calling finish_revert_resize
sys_meta = instance.system_metadata
sys_meta.pop('old_vm_state')
# Have to reset for save() to work
instance.system_metadata = sys_meta
instance.save()
self.compute.finish_revert_resize(self.context,
migration=migration,
instance=instance, reservations=reservations)
self.assertIsNone(instance.task_state)
instance_type_ref = db.flavor_get(self.context,
instance['instance_type_id'])
self.assertEqual(instance_type_ref['flavorid'], '1')
self.assertEqual(instance.host, migration.source_compute)
if remove_old_vm_state:
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
else:
self.assertEqual(old_vm_state, instance.vm_state)
def test_finish_revert_resize_from_active(self):
self._test_finish_revert_resize(power_on=True)
def test_finish_revert_resize_from_stopped(self):
self._test_finish_revert_resize(power_on=False)
def test_finish_revert_resize_from_stopped_remove_old_vm_state(self):
# in this case we resize from STOPPED but end up with ACTIVE
# because the old_vm_state value is not present in
# finish_revert_resize
self._test_finish_revert_resize(power_on=False,
remove_old_vm_state=True)
def _test_cleanup_stored_instance_types(self, old, new, revert=False):
instance = self._create_fake_instance_obj()
migration = dict(old_instance_type_id=old,
new_instance_type_id=new)
instance.system_metadata = dict(instance_type_id=old)
sys_meta = dict(instance.system_metadata)
self.mox.StubOutWithMock(flavors, 'extract_flavor')
self.mox.StubOutWithMock(flavors, 'delete_flavor_info')
self.mox.StubOutWithMock(flavors, 'save_flavor_info')
if revert:
flavors.extract_flavor(instance, 'old_').AndReturn(
{'instance_type_id': old})
flavors.save_flavor_info(
sys_meta, {'instance_type_id': old}).AndReturn(sys_meta)
else:
flavors.extract_flavor(instance).AndReturn(
{'instance_type_id': new})
flavors.delete_flavor_info(
sys_meta, 'old_').AndReturn(sys_meta)
flavors.delete_flavor_info(
sys_meta, 'new_').AndReturn(sys_meta)
self.mox.ReplayAll()
res = self.compute._cleanup_stored_instance_types(migration, instance,
revert)
self.assertEqual(res,
(sys_meta,
{'instance_type_id': revert and old or new}))
def test_cleanup_stored_instance_types_for_resize(self):
self._test_cleanup_stored_instance_types('1', '2')
def test_cleanup_stored_instance_types_for_resize_with_update(self):
self._test_cleanup_stored_instance_types('1', '2', True)
def test_cleanup_stored_instance_types_for_migration(self):
self._test_cleanup_stored_instance_types('1', '1')
def test_cleanup_stored_instance_types_for_migration_with_update(self):
self._test_cleanup_stored_instance_types('1', '1', True)
def test_get_by_flavor_id(self):
type = flavors.get_flavor_by_flavor_id(1)
self.assertEqual(type['name'], 'm1.tiny')
def test_resize_same_source_fails(self):
"""Ensure instance fails to migrate when source and destination are
the same host.
"""
instance = self._create_fake_instance_obj()
reservations = self._ensure_quota_reservations_rolledback(instance)
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
instance.refresh()
instance_type = flavors.get_default_flavor()
self.assertRaises(exception.MigrationError, self.compute.prep_resize,
self.context, instance=instance,
instance_type=instance_type, image={},
reservations=reservations, request_spec={},
filter_properties={}, node=None)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_resize_instance_handles_migration_error(self):
# Ensure vm_state is ERROR when error occurs.
def raise_migration_failure(*args):
raise test.TestingException()
self.stubs.Set(self.compute.driver,
'migrate_disk_and_power_off',
raise_migration_failure)
instance = self._create_fake_instance_obj()
reservations = self._ensure_quota_reservations_rolledback(instance)
instance_type = flavors.get_default_flavor()
instance_p = obj_base.obj_to_primitive(instance)
self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
None, True, None, False)
instance.host = 'foo'
instance.save()
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type,
image={}, reservations=reservations,
request_spec={}, filter_properties={},
node=None)
migration = migration_obj.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=instance,
migration=migration, image={},
reservations=reservations,
instance_type=jsonutils.to_primitive(instance_type))
# NOTE(comstud): error path doesn't use objects, so our object
# is not updated. Refresh and compare against the DB.
instance.refresh()
self.assertEqual(instance.vm_state, vm_states.ERROR)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def test_pre_live_migration_instance_has_no_fixed_ip(self):
# Confirm that no exception is raised if there is no fixed ip on
# pre_live_migration
instance = self._create_fake_instance_obj()
c = context.get_admin_context()
self.mox.ReplayAll()
self.compute.driver.pre_live_migration(mox.IsA(c), mox.IsA(instance),
{'block_device_mapping': []},
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
def test_pre_live_migration_works_correctly(self):
# Confirm setup_compute_volume is called when volume is mounted.
def stupid(*args, **kwargs):
return fake_network.fake_get_instance_nw_info(self.stubs)
self.stubs.Set(nova.compute.manager.ComputeManager,
'_get_instance_nw_info', stupid)
# creating instance testdata
instance = self._create_fake_instance_obj({'host': 'dummy'})
c = context.get_admin_context()
nw_info = fake_network.fake_get_instance_nw_info(self.stubs)
# creating mocks
self.mox.StubOutWithMock(self.compute.driver, 'pre_live_migration')
self.compute.driver.pre_live_migration(mox.IsA(c), mox.IsA(instance),
{'block_device_mapping': []},
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.StubOutWithMock(self.compute.driver,
'ensure_filtering_rules_for_instance')
self.compute.driver.ensure_filtering_rules_for_instance(
mox.IsA(instance), nw_info)
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
self.compute.network_api.setup_networks_on_host(c, instance,
self.compute.host)
fake_notifier.NOTIFICATIONS = []
# start test
self.mox.ReplayAll()
migrate_data = {'is_shared_storage': False}
ret = self.compute.pre_live_migration(c, instance=instance,
block_migration=False, disk=None,
migrate_data=migrate_data)
self.assertIsNone(ret)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.live_migration.pre.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.live_migration.pre.end')
# cleanup
db.instance_destroy(c, instance['uuid'])
def test_live_migration_exception_rolls_back(self):
# Confirm exception when pre_live_migration fails.
c = context.get_admin_context()
src_host = 'fake-src-host'
instance = dict(uuid='fake_instance', host=src_host,
name='fake-name')
updated_instance = self._create_fake_instance_obj(
{'host': 'fake-dest-host'})
dest_host = updated_instance['host']
fake_bdms = [
block_device_obj.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 'vol1-id', 'source_type': 'volume',
'destination_type': 'volume'})),
block_device_obj.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 'vol2-id', 'source_type': 'volume',
'destination_type': 'volume'}))
]
# creating mocks
self.mox.StubOutWithMock(self.compute.driver,
'get_instance_disk_info')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'pre_live_migration')
self.mox.StubOutWithMock(self.compute, '_instance_update')
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMappingList,
'get_by_instance_uuid')
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'remove_volume_connection')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'rollback_live_migration_at_destination')
self.compute.driver.get_instance_disk_info(
instance['name']).AndReturn('fake_disk')
self.compute.compute_rpcapi.pre_live_migration(c,
instance, True, 'fake_disk', dest_host,
{}).AndRaise(test.TestingException())
self.compute._instance_update(c, instance['uuid'],
host=src_host, vm_state=vm_states.ACTIVE,
task_state=None,
expected_task_state=task_states.MIGRATING).AndReturn(
updated_instance)
self.compute.network_api.setup_networks_on_host(c,
updated_instance, self.compute.host)
block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(c,
updated_instance['uuid']).AndReturn(fake_bdms)
self.compute.compute_rpcapi.remove_volume_connection(
c, updated_instance, 'vol1-id', dest_host)
self.compute.compute_rpcapi.remove_volume_connection(
c, updated_instance, 'vol2-id', dest_host)
self.compute.compute_rpcapi.rollback_live_migration_at_destination(
c, updated_instance, dest_host)
# start test
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.compute.live_migration,
c, dest=dest_host, block_migration=True,
instance=instance, migrate_data={})
def test_live_migration_works_correctly(self):
# Confirm live_migration() works as expected correctly.
# creating instance testdata
c = context.get_admin_context()
instance = self._objectify(self._create_fake_instance_obj())
instance.host = self.compute.host
dest = 'desthost'
migrate_data = {'is_shared_storage': False}
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'pre_live_migration')
self.compute.compute_rpcapi.pre_live_migration(
c, instance, False, None, dest, migrate_data)
self.mox.StubOutWithMock(self.compute.network_api,
'migrate_instance_start')
migration = {'source_compute': instance['host'], 'dest_compute': dest}
self.compute.network_api.migrate_instance_start(c, instance,
migration)
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'post_live_migration_at_destination')
self.compute.compute_rpcapi.post_live_migration_at_destination(
c, instance, False, dest)
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
self.compute.network_api.setup_networks_on_host(c, instance,
instance['host'],
teardown=True)
self.mox.StubOutWithMock(self.compute.instance_events,
'clear_events_for_instance')
self.compute.instance_events.clear_events_for_instance(
mox.IgnoreArg())
# start test
self.mox.ReplayAll()
ret = self.compute.live_migration(c, dest=dest,
instance=instance,
block_migration=False,
migrate_data=migrate_data)
self.assertIsNone(ret)
# cleanup
instance.destroy(c)
def test_post_live_migration_no_shared_storage_working_correctly(self):
"""Confirm post_live_migration() works correctly as expected
for non shared storage migration.
"""
# Create stubs
result = {}
# No share storage live migration don't need to destroy at source
# server because instance has been migrated to destination, but a
# cleanup for block device and network are needed.
def fakecleanup(*args, **kwargs):
result['cleanup'] = True
self.stubs.Set(self.compute.driver, 'cleanup', fakecleanup)
dest = 'desthost'
srchost = self.compute.host
# creating testdata
c = context.get_admin_context()
inst_ref = jsonutils.to_primitive(self._create_fake_instance({
'host': srchost,
'state_description': 'migrating',
'state': power_state.PAUSED}))
inst_uuid = inst_ref['uuid']
db.instance_update(c, inst_uuid,
{'task_state': task_states.MIGRATING,
'power_state': power_state.PAUSED})
# creating mocks
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
self.compute.driver.unfilter_instance(inst_ref, [])
self.mox.StubOutWithMock(self.compute.network_api,
'migrate_instance_start')
migration = {'source_compute': srchost, 'dest_compute': dest, }
self.compute.network_api.migrate_instance_start(c, inst_ref,
migration)
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'post_live_migration_at_destination')
self.compute.compute_rpcapi.post_live_migration_at_destination(
c, inst_ref, False, dest)
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
self.compute.network_api.setup_networks_on_host(c, inst_ref,
self.compute.host,
teardown=True)
self.mox.StubOutWithMock(self.compute.instance_events,
'clear_events_for_instance')
self.compute.instance_events.clear_events_for_instance(
mox.IgnoreArg())
# start test
self.mox.ReplayAll()
migrate_data = {'is_shared_storage': False}
self.compute._post_live_migration(c, inst_ref, dest,
migrate_data=migrate_data)
self.assertIn('cleanup', result)
self.assertEqual(result['cleanup'], True)
def test_post_live_migration_working_correctly(self):
# Confirm post_live_migration() works as expected correctly.
dest = 'desthost'
srchost = self.compute.host
# creating testdata
c = context.get_admin_context()
instance = self._objectify(self._create_fake_instance({
'host': srchost,
'state_description': 'migrating',
'state': power_state.PAUSED}))
instance.update({'task_state': task_states.MIGRATING,
'power_state': power_state.PAUSED})
instance.save(c)
# creating mocks
with contextlib.nested(
mock.patch.object(self.compute.driver, 'post_live_migration'),
mock.patch.object(self.compute.driver, 'unfilter_instance'),
mock.patch.object(self.compute.network_api,
'migrate_instance_start'),
mock.patch.object(self.compute.compute_rpcapi,
'post_live_migration_at_destination'),
mock.patch.object(self.compute.driver, 'unplug_vifs'),
mock.patch.object(self.compute.network_api,
'setup_networks_on_host'),
mock.patch.object(self.compute.instance_events,
'clear_events_for_instance')
) as (
post_live_migration, unfilter_instance,
migrate_instance_start, post_live_migration_at_destination,
unplug_vifs, setup_networks_on_host, clear_events
):
self.compute._post_live_migration(c, instance, dest)
post_live_migration.assert_has_calls([
mock.call(c, instance, {'block_device_mapping': []}, None)])
unfilter_instance.assert_has_calls([mock.call(instance, [])])
migration = {'source_compute': srchost,
'dest_compute': dest, }
migrate_instance_start.assert_has_calls([
mock.call(c, instance, migration)])
post_live_migration_at_destination.assert_has_calls([
mock.call(c, instance, False, dest)])
unplug_vifs.assert_has_calls([mock.call(instance, [])])
setup_networks_on_host.assert_has_calls([
mock.call(c, instance, self.compute.host, teardown=True)])
clear_events.assert_called_once_with(instance)
def test_post_live_migration_terminate_volume_connections(self):
c = context.get_admin_context()
instance = self._objectify(self._create_fake_instance({
'host': self.compute.host,
'state_description': 'migrating',
'state': power_state.PAUSED}))
instance.update({'task_state': task_states.MIGRATING,
'power_state': power_state.PAUSED})
instance.save(c)
bdms = block_device_obj.block_device_make_list(c,
[fake_block_device.FakeDbBlockDeviceDict({
'source_type': 'blank', 'guest_format': None,
'destination_type': 'local'}),
fake_block_device.FakeDbBlockDeviceDict({
'source_type': 'volume', 'destination_type': 'volume',
'volume_id': 'fake-volume-id'}),
])
with contextlib.nested(
mock.patch.object(self.compute.network_api,
'migrate_instance_start'),
mock.patch.object(self.compute.compute_rpcapi,
'post_live_migration_at_destination'),
mock.patch.object(self.compute.network_api,
'setup_networks_on_host'),
mock.patch.object(self.compute.instance_events,
'clear_events_for_instance'),
mock.patch.object(self.compute,
'_get_instance_volume_block_device_info'),
mock.patch.object(block_device_obj.BlockDeviceMappingList,
'get_by_instance_uuid'),
mock.patch.object(self.compute.driver, 'get_volume_connector'),
mock.patch.object(cinder.API, 'terminate_connection')
) as (
migrate_instance_start, post_live_migration_at_destination,
setup_networks_on_host, clear_events_for_instance,
get_instance_volume_block_device_info, get_by_instance_uuid,
get_volume_connector, terminate_connection
):
get_by_instance_uuid.return_value = bdms
get_volume_connector.return_value = 'fake-connector'
self.compute._post_live_migration(c, instance, 'dest_host')
terminate_connection.assert_called_once_with(
c, 'fake-volume-id', 'fake-connector')
def _begin_post_live_migration_at_destination(self):
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
self.mox.StubOutWithMock(self.compute.network_api,
'migrate_instance_finish')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute, '_get_compute_info')
params = {'task_state': task_states.MIGRATING,
'power_state': power_state.PAUSED, }
self.instance = jsonutils.to_primitive(
self._create_fake_instance(params))
self.admin_ctxt = context.get_admin_context()
self.instance = instance_obj.Instance._from_db_object(self.context,
instance_obj.Instance(),
db.instance_get_by_uuid(self.admin_ctxt, self.instance['uuid']))
self.compute.network_api.setup_networks_on_host(self.admin_ctxt,
self.instance,
self.compute.host)
migration = {'source_compute': self.instance['host'],
'dest_compute': self.compute.host, }
self.compute.network_api.migrate_instance_finish(
self.admin_ctxt, self.instance, migration)
fake_net_info = []
fake_block_dev_info = {'foo': 'bar'}
self.compute.driver.post_live_migration_at_destination(self.admin_ctxt,
self.instance,
fake_net_info,
False,
fake_block_dev_info)
self.compute._get_power_state(self.admin_ctxt,
self.instance).AndReturn(10001)
def _finish_post_live_migration_at_destination(self):
self.compute.network_api.setup_networks_on_host(self.admin_ctxt,
mox.IgnoreArg(), self.compute.host)
fake_notifier.NOTIFICATIONS = []
self.mox.ReplayAll()
self.compute.post_live_migration_at_destination(self.admin_ctxt,
self.instance, False)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.live_migration.post.dest.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.live_migration.post.dest.end')
return self.compute.conductor_api.instance_get_by_uuid(self.admin_ctxt,
self.instance['uuid'])
def test_post_live_migration_at_destination_with_compute_info(self):
"""The instance's node property should be updated correctly."""
self._begin_post_live_migration_at_destination()
hypervisor_hostname = 'fake_hypervisor_hostname'
fake_compute_info = compute_node_obj.ComputeNode(
hypervisor_hostname=hypervisor_hostname)
self.compute._get_compute_info(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
fake_compute_info)
updated = self._finish_post_live_migration_at_destination()
self.assertEqual(updated['node'], hypervisor_hostname)
def test_post_live_migration_at_destination_without_compute_info(self):
"""The instance's node property should be set to None if we fail to
get compute_info.
"""
self._begin_post_live_migration_at_destination()
self.compute._get_compute_info(mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(
exception.NotFound())
updated = self._finish_post_live_migration_at_destination()
self.assertIsNone(updated['node'])
def test_rollback_live_migration_at_destination_correctly(self):
# creating instance testdata
c = context.get_admin_context()
instance_ref = self._create_fake_instance({'host': 'dummy'})
inst_uuid = instance_ref['uuid']
inst_id = instance_ref['id']
instance = jsonutils.to_primitive(db.instance_get(c, inst_id))
fake_notifier.NOTIFICATIONS = []
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
self.compute.network_api.setup_networks_on_host(c, instance,
self.compute.host,
teardown=True)
self.mox.StubOutWithMock(self.compute.driver,
'rollback_live_migration_at_destination')
self.compute.driver.rollback_live_migration_at_destination(c,
instance, [], {'block_device_mapping': []})
# start test
self.mox.ReplayAll()
ret = self.compute.rollback_live_migration_at_destination(c,
instance=instance)
self.assertIsNone(ret)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'compute.instance.live_migration.rollback.dest.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'compute.instance.live_migration.rollback.dest.end')
# cleanup
db.instance_destroy(c, inst_uuid)
def test_run_kill_vm(self):
# Detect when a vm is terminated behind the scenes.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
instances = db.instance_get_all(self.context)
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
instance_name = instances[0]['name']
self.compute.driver.test_remove_vm(instance_name)
# Force the compute manager to do its periodic poll
ctxt = context.get_admin_context()
self.compute._sync_power_states(ctxt)
instances = db.instance_get_all(self.context)
LOG.info(_("After force-killing instances: %s"), instances)
self.assertEqual(len(instances), 1)
self.assertIsNone(instances[0]['task_state'])
def test_add_instance_fault(self):
instance = self._create_fake_instance()
exc_info = None
def fake_db_fault_create(ctxt, values):
self.assertIn('raise NotImplementedError', values['details'])
del values['details']
expected = {
'code': 500,
'message': 'test',
'instance_uuid': instance['uuid'],
'host': self.compute.host
}
self.assertEqual(expected, values)
try:
raise NotImplementedError('test')
except NotImplementedError:
exc_info = sys.exc_info()
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
self.compute.conductor_api,
instance,
NotImplementedError('test'),
exc_info)
def test_add_instance_fault_with_remote_error(self):
instance = self._create_fake_instance()
exc_info = None
def fake_db_fault_create(ctxt, values):
self.assertTrue('raise messaging.RemoteError'
in values['details'])
del values['details']
expected = {
'code': 500,
'instance_uuid': instance['uuid'],
'message': 'Remote error: test My Test Message\nNone.',
'host': self.compute.host
}
self.assertEqual(expected, values)
try:
raise messaging.RemoteError('test', 'My Test Message')
except messaging.RemoteError as exc:
exc_info = sys.exc_info()
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
self.compute.conductor_api, instance, exc, exc_info)
def test_add_instance_fault_user_error(self):
instance = self._create_fake_instance()
exc_info = None
def fake_db_fault_create(ctxt, values):
expected = {
'code': 400,
'message': 'fake details',
'details': '',
'instance_uuid': instance['uuid'],
'host': self.compute.host
}
self.assertEqual(expected, values)
user_exc = exception.Invalid('fake details', code=400)
try:
raise user_exc
except exception.Invalid:
exc_info = sys.exc_info()
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
self.compute.conductor_api, instance, user_exc, exc_info)
def test_add_instance_fault_no_exc_info(self):
instance = self._create_fake_instance()
def fake_db_fault_create(ctxt, values):
expected = {
'code': 500,
'message': 'test',
'details': '',
'instance_uuid': instance['uuid'],
'host': self.compute.host
}
self.assertEqual(expected, values)
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
self.compute.conductor_api,
instance,
NotImplementedError('test'))
def test_add_instance_fault_long_message(self):
instance = self._create_fake_instance()
message = 300 * 'a'
def fake_db_fault_create(ctxt, values):
expected = {
'code': 500,
'message': message[:255],
'details': '',
'instance_uuid': instance['uuid'],
'host': self.compute.host
}
self.assertEqual(expected, values)
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
self.compute.conductor_api,
instance,
NotImplementedError(message))
def _test_cleanup_running(self, action):
admin_context = context.get_admin_context()
deleted_at = (timeutils.utcnow() -
datetime.timedelta(hours=1, minutes=5))
instance1 = self._create_fake_instance({"deleted_at": deleted_at,
"deleted": True})
instance2 = self._create_fake_instance({"deleted_at": deleted_at,
"deleted": True})
self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
self.compute._get_instances_on_driver(
admin_context, {'deleted': True,
'soft_deleted': False,
'host': self.compute.host}).AndReturn([instance1,
instance2])
self.flags(running_deleted_instance_timeout=3600,
running_deleted_instance_action=action)
return admin_context, instance1, instance2
def test_cleanup_running_deleted_instances_unrecognized_value(self):
admin_context = context.get_admin_context()
deleted_at = (timeutils.utcnow() -
datetime.timedelta(hours=1, minutes=5))
instance = self._create_fake_instance({"deleted_at": deleted_at,
"deleted": True})
self.flags(running_deleted_instance_action='foo-action')
with mock.patch.object(
self.compute, '_get_instances_on_driver',
return_value=[instance]) as _get_instances_on_driver:
try:
# We cannot simply use an assertRaises here because the
# exception raised is too generally "Exception". To be sure
# that the exception raised is the expected one, we check
# the message.
self.compute._cleanup_running_deleted_instances(admin_context)
self.fail("Be sure this will never be executed.")
except Exception as e:
self.assertIn("Unrecognized value", six.text_type(e))
def test_cleanup_running_deleted_instances_reap(self):
ctxt, inst1, inst2 = self._test_cleanup_running('reap')
bdms = block_device_obj.block_device_make_list(ctxt, [])
self.mox.StubOutWithMock(self.compute, "_shutdown_instance")
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMappingList,
"get_by_instance_uuid")
# Simulate an error and make sure cleanup proceeds with next instance.
self.compute._shutdown_instance(ctxt, inst1, bdms, notify=False).\
AndRaise(test.TestingException)
block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(ctxt,
inst1.uuid, use_slave=True).AndReturn(bdms)
block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(ctxt,
inst2.uuid, use_slave=True).AndReturn(bdms)
self.compute._shutdown_instance(ctxt, inst2, bdms, notify=False).\
AndReturn(None)
self.mox.StubOutWithMock(self.compute, "_cleanup_volumes")
self.compute._cleanup_volumes(ctxt, inst1['uuid'], bdms).\
AndReturn(None)
self.mox.ReplayAll()
self.compute._cleanup_running_deleted_instances(ctxt)
def test_cleanup_running_deleted_instances_shutdown(self):
ctxt, inst1, inst2 = self._test_cleanup_running('shutdown')
self.mox.StubOutWithMock(self.compute.driver, 'set_bootable')
self.mox.StubOutWithMock(self.compute.driver, 'power_off')
self.compute.driver.set_bootable(inst1, False)
self.compute.driver.power_off(inst1)
self.compute.driver.set_bootable(inst2, False)
self.compute.driver.power_off(inst2)
self.mox.ReplayAll()
self.compute._cleanup_running_deleted_instances(ctxt)
def test_cleanup_running_deleted_instances_shutdown_notimpl(self):
ctxt, inst1, inst2 = self._test_cleanup_running('shutdown')
self.mox.StubOutWithMock(self.compute.driver, 'set_bootable')
self.mox.StubOutWithMock(self.compute.driver, 'power_off')
self.compute.driver.set_bootable(inst1, False).AndRaise(
NotImplementedError)
compute_manager.LOG.warn(mox.IgnoreArg())
self.compute.driver.power_off(inst1)
self.compute.driver.set_bootable(inst2, False).AndRaise(
NotImplementedError)
compute_manager.LOG.warn(mox.IgnoreArg())
self.compute.driver.power_off(inst2)
self.mox.ReplayAll()
self.compute._cleanup_running_deleted_instances(ctxt)
def test_cleanup_running_deleted_instances_shutdown_error(self):
ctxt, inst1, inst2 = self._test_cleanup_running('shutdown')
self.mox.StubOutWithMock(self.compute.driver, 'set_bootable')
self.mox.StubOutWithMock(self.compute.driver, 'power_off')
self.mox.StubOutWithMock(compute_manager.LOG, 'exception')
e = test.TestingException('bad')
self.compute.driver.set_bootable(inst1, False)
self.compute.driver.power_off(inst1).AndRaise(e)
compute_manager.LOG.warn(mox.IgnoreArg())
self.compute.driver.set_bootable(inst2, False)
self.compute.driver.power_off(inst2).AndRaise(e)
compute_manager.LOG.warn(mox.IgnoreArg())
self.mox.ReplayAll()
self.compute._cleanup_running_deleted_instances(ctxt)
def test_running_deleted_instances(self):
admin_context = context.get_admin_context()
self.compute.host = 'host'
instance1 = {}
instance1['deleted'] = True
instance1['deleted_at'] = "sometimeago"
self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
self.compute._get_instances_on_driver(
admin_context, {'deleted': True,
'soft_deleted': False,
'host': self.compute.host}).AndReturn([instance1])
self.mox.StubOutWithMock(timeutils, 'is_older_than')
timeutils.is_older_than('sometimeago',
CONF.running_deleted_instance_timeout).AndReturn(True)
self.mox.ReplayAll()
val = self.compute._running_deleted_instances(admin_context)
self.assertEqual(val, [instance1])
def test_get_instance_nw_info(self):
fake_network.unset_stub_network_methods(self.stubs)
fake_inst = fake_instance.fake_db_instance(uuid='fake-instance')
fake_nw_info = network_model.NetworkInfo()
self.mox.StubOutWithMock(self.compute.network_api,
'get_instance_nw_info')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_inst['uuid']
).AndReturn(fake_inst)
# NOTE(danms): compute manager will re-query since we're not giving
# it an instance with system_metadata. We're stubbing out the
# subsequent call so we don't need it, but keep this to make sure it
# does the right thing.
db.instance_get_by_uuid(self.context, fake_inst['uuid'],
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(fake_inst)
self.compute.network_api.get_instance_nw_info(self.context,
mox.IsA(instance_obj.Instance)).AndReturn(fake_nw_info)
self.mox.ReplayAll()
fake_inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), fake_inst, [])
result = self.compute._get_instance_nw_info(self.context,
fake_inst_obj)
self.assertEqual(fake_nw_info, result)
def test_heal_instance_info_cache(self):
# Update on every call for the test
self.flags(heal_instance_info_cache_interval=-1)
ctxt = context.get_admin_context()
instance_map = {}
instances = []
for x in xrange(8):
inst_uuid = 'fake-uuid-%s' % x
instance_map[inst_uuid] = fake_instance.fake_db_instance(
uuid=inst_uuid, host=CONF.host, created_at=None)
# These won't be in our instance since they're not requested
instances.append(instance_map[inst_uuid])
call_info = {'get_all_by_host': 0, 'get_by_uuid': 0,
'get_nw_info': 0, 'expected_instance': None}
def fake_instance_get_all_by_host(context, host,
columns_to_join, use_slave=False):
call_info['get_all_by_host'] += 1
self.assertEqual([], columns_to_join)
return instances[:]
def fake_instance_get_by_uuid(context, instance_uuid,
columns_to_join, use_slave=False):
if instance_uuid not in instance_map:
raise exception.InstanceNotFound(instance_id=instance_uuid)
call_info['get_by_uuid'] += 1
self.assertEqual(['system_metadata', 'info_cache'],
columns_to_join)
return instance_map[instance_uuid]
# NOTE(comstud): Override the stub in setUp()
def fake_get_instance_nw_info(context, instance, use_slave=False):
# Note that this exception gets caught in compute/manager
# and is ignored. However, the below increment of
# 'get_nw_info' won't happen, and you'll get an assert
# failure checking it below.
self.assertEqual(call_info['expected_instance']['uuid'],
instance['uuid'])
call_info['get_nw_info'] += 1
self.stubs.Set(db, 'instance_get_all_by_host',
fake_instance_get_all_by_host)
self.stubs.Set(db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
self.stubs.Set(self.compute, '_get_instance_nw_info',
fake_get_instance_nw_info)
# Make an instance appear to be still Building
instances[0]['vm_state'] = vm_states.BUILDING
# Make an instance appear to be Deleting
instances[1]['task_state'] = task_states.DELETING
# '0', '1' should be skipped..
call_info['expected_instance'] = instances[2]
self.compute._heal_instance_info_cache(ctxt)
self.assertEqual(1, call_info['get_all_by_host'])
self.assertEqual(0, call_info['get_by_uuid'])
self.assertEqual(1, call_info['get_nw_info'])
call_info['expected_instance'] = instances[3]
self.compute._heal_instance_info_cache(ctxt)
self.assertEqual(1, call_info['get_all_by_host'])
self.assertEqual(1, call_info['get_by_uuid'])
self.assertEqual(2, call_info['get_nw_info'])
# Make an instance switch hosts
instances[4]['host'] = 'not-me'
# Make an instance disappear
instance_map.pop(instances[5]['uuid'])
# Make an instance switch to be Deleting
instances[6]['task_state'] = task_states.DELETING
# '4', '5', and '6' should be skipped..
call_info['expected_instance'] = instances[7]
self.compute._heal_instance_info_cache(ctxt)
self.assertEqual(1, call_info['get_all_by_host'])
self.assertEqual(4, call_info['get_by_uuid'])
self.assertEqual(3, call_info['get_nw_info'])
# Should be no more left.
self.assertEqual(0, len(self.compute._instance_uuids_to_heal))
# This should cause a DB query now, so get a list of instances
# where none can be processed to make sure we handle that case
# cleanly. Use just '0' (Building) and '1' (Deleting)
instances = instances[0:2]
self.compute._heal_instance_info_cache(ctxt)
# Should have called the list once more
self.assertEqual(2, call_info['get_all_by_host'])
# Stays the same because we remove invalid entries from the list
self.assertEqual(4, call_info['get_by_uuid'])
# Stays the same because we didn't find anything to process
self.assertEqual(3, call_info['get_nw_info'])
@mock.patch('nova.objects.instance.InstanceList.get_by_filters')
@mock.patch('nova.compute.api.API.unrescue')
def test_poll_rescued_instances(self, unrescue, get):
timed_out_time = timeutils.utcnow() - datetime.timedelta(minutes=5)
not_timed_out_time = timeutils.utcnow()
instances = [instance_obj.Instance(uuid='fake_uuid1',
vm_state=vm_states.RESCUED,
launched_at=timed_out_time),
instance_obj.Instance(uuid='fake_uuid2',
vm_state=vm_states.RESCUED,
launched_at=timed_out_time),
instance_obj.Instance(uuid='fake_uuid3',
vm_state=vm_states.RESCUED,
launched_at=not_timed_out_time)]
unrescued_instances = {'fake_uuid1': False, 'fake_uuid2': False}
def fake_instance_get_all_by_filters(context, filters,
expected_attrs=None,
use_slave=False):
self.assertEqual(["system_metadata"], expected_attrs)
return instances
get.side_effect = fake_instance_get_all_by_filters
def fake_unrescue(context, instance):
unrescued_instances[instance['uuid']] = True
unrescue.side_effect = fake_unrescue
self.flags(rescue_timeout=60)
ctxt = context.get_admin_context()
self.compute._poll_rescued_instances(ctxt)
for instance in unrescued_instances.values():
self.assertTrue(instance)
def test_poll_unconfirmed_resizes(self):
instances = [
fake_instance.fake_db_instance(uuid='fake_uuid1',
vm_state=vm_states.RESIZED,
task_state=None),
fake_instance.fake_db_instance(uuid='noexist'),
fake_instance.fake_db_instance(uuid='fake_uuid2',
vm_state=vm_states.ERROR,
task_state=None),
fake_instance.fake_db_instance(uuid='fake_uuid3',
vm_state=vm_states.ACTIVE,
task_state=
task_states.REBOOTING),
fake_instance.fake_db_instance(uuid='fake_uuid4',
vm_state=vm_states.RESIZED,
task_state=None),
fake_instance.fake_db_instance(uuid='fake_uuid5',
vm_state=vm_states.ACTIVE,
task_state=None),
fake_instance.fake_db_instance(uuid='fake_uuid6',
vm_state=vm_states.RESIZED,
task_state='deleting')]
expected_migration_status = {'fake_uuid1': 'confirmed',
'noexist': 'error',
'fake_uuid2': 'error',
'fake_uuid3': 'error',
'fake_uuid4': None,
'fake_uuid5': 'error',
'fake_uuid6': 'error'}
migrations = []
for i, instance in enumerate(instances, start=1):
fake_mig = test_migration.fake_db_migration()
fake_mig.update({'id': i,
'instance_uuid': instance['uuid'],
'status': None})
migrations.append(fake_mig)
def fake_instance_get_by_uuid(context, instance_uuid,
columns_to_join=None, use_slave=False):
self.assertIn('metadata', columns_to_join)
self.assertIn('system_metadata', columns_to_join)
# raise InstanceNotFound exception for uuid 'noexist'
if instance_uuid == 'noexist':
raise exception.InstanceNotFound(instance_id=instance_uuid)
for instance in instances:
if instance['uuid'] == instance_uuid:
return instance
def fake_migration_get_unconfirmed_by_dest_compute(context,
resize_confirm_window, dest_compute, use_slave=False):
self.assertEqual(dest_compute, CONF.host)
return migrations
def fake_migration_update(context, mid, updates):
for migration in migrations:
if migration['id'] == mid:
migration.update(updates)
return migration
def fake_confirm_resize(context, instance, migration=None):
# raise exception for 'fake_uuid4' to check migration status
# does not get set to 'error' on confirm_resize failure.
if instance['uuid'] == 'fake_uuid4':
raise test.TestingException('bomb')
self.assertIsNotNone(migration)
for migration2 in migrations:
if (migration2['instance_uuid'] ==
migration['instance_uuid']):
migration2['status'] = 'confirmed'
self.stubs.Set(db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
self.stubs.Set(db, 'migration_get_unconfirmed_by_dest_compute',
fake_migration_get_unconfirmed_by_dest_compute)
self.stubs.Set(db, 'migration_update', fake_migration_update)
self.stubs.Set(self.compute.compute_api, 'confirm_resize',
fake_confirm_resize)
def fetch_instance_migration_status(instance_uuid):
for migration in migrations:
if migration['instance_uuid'] == instance_uuid:
return migration['status']
self.flags(resize_confirm_window=60)
ctxt = context.get_admin_context()
self.compute._poll_unconfirmed_resizes(ctxt)
for uuid, status in expected_migration_status.iteritems():
self.assertEqual(status, fetch_instance_migration_status(uuid))
def test_instance_build_timeout_disabled(self):
# Tests that no instances are set to error state when there is no
# instance_build_timeout configured.
self.flags(instance_build_timeout=0)
ctxt = context.get_admin_context()
created_at = timeutils.utcnow() + datetime.timedelta(seconds=-60)
filters = {'vm_state': vm_states.BUILDING, 'host': CONF.host}
instances = []
for x in xrange(5):
instance = {'uuid': str(uuid.uuid4()), 'created_at': created_at}
instance.update(filters)
instances.append(instance)
# creating mocks
with mock.patch.object(self.compute.conductor_api,
'instance_get_all_by_filters',
return_value=instances) as (
instance_get_all_by_filters
):
# run the code
self.compute._check_instance_build_time(ctxt)
# check our assertions
self.assertThat(instance_get_all_by_filters.mock_calls,
testtools_matchers.HasLength(0))
def test_instance_build_timeout_mixed_instances(self):
# Tests that instances which failed to build within the configured
# instance_build_timeout value are set to error state.
self.flags(instance_build_timeout=30)
ctxt = context.get_admin_context()
created_at = timeutils.utcnow() + datetime.timedelta(seconds=-60)
filters = {'vm_state': vm_states.BUILDING, 'host': CONF.host}
# these are the ones that are expired
old_instances = []
for x in xrange(4):
instance = {'uuid': str(uuid.uuid4()), 'created_at': created_at}
instance.update(filters)
old_instances.append(fake_instance.fake_db_instance(**instance))
#not expired
instances = list(old_instances) # copy the contents of old_instances
new_instance = {
'uuid': str(uuid.uuid4()),
'created_at': timeutils.utcnow(),
}
sort_key = 'created_at'
sort_dir = 'desc'
new_instance.update(filters)
instances.append(fake_instance.fake_db_instance(**new_instance))
# need something to return from conductor_api.instance_update
# that is defined outside the for loop and can be used in the mock
# context
fake_instance_ref = {'host': CONF.host, 'node': 'fake'}
# creating mocks
with contextlib.nested(
mock.patch.object(self.compute.db.sqlalchemy.api,
'instance_get_all_by_filters',
return_value=instances),
mock.patch.object(self.compute.conductor_api, 'instance_update',
return_value=fake_instance_ref),
mock.patch.object(self.compute.driver, 'node_is_available',
return_value=False)
) as (
instance_get_all_by_filters,
conductor_instance_update,
node_is_available
):
# run the code
self.compute._check_instance_build_time(ctxt)
# check our assertions
instance_get_all_by_filters.assert_called_once_with(
ctxt, filters,
sort_key,
sort_dir,
marker=None,
columns_to_join=[],
use_slave=True,
limit=None)
self.assertThat(conductor_instance_update.mock_calls,
testtools_matchers.HasLength(len(old_instances)))
self.assertThat(node_is_available.mock_calls,
testtools_matchers.HasLength(len(old_instances)))
for inst in old_instances:
conductor_instance_update.assert_has_calls([
mock.call(ctxt, inst['uuid'],
vm_state=vm_states.ERROR)])
node_is_available.assert_has_calls([
mock.call(fake_instance_ref['node'])])
def test_get_resource_tracker_fail(self):
self.assertRaises(exception.NovaException,
self.compute._get_resource_tracker,
'invalidnodename')
def test_instance_update_host_check(self):
# make sure rt usage doesn't happen if the host or node is different
def fail_get(nodename):
raise test.TestingException(_("wrong host/node"))
self.stubs.Set(self.compute, '_get_resource_tracker', fail_get)
instance = self._create_fake_instance({'host': 'someotherhost'})
self.compute._instance_update(self.context, instance['uuid'])
instance = self._create_fake_instance({'node': 'someothernode'})
self.compute._instance_update(self.context, instance['uuid'])
params = {'host': 'someotherhost', 'node': 'someothernode'}
instance = self._create_fake_instance(params)
self.compute._instance_update(self.context, instance['uuid'])
def test_destroy_evacuated_instance_on_shared_storage(self):
fake_context = context.get_admin_context()
# instances in central db
instances = [
# those are still related to this host
self._create_fake_instance_obj(
{'host': self.compute.host}),
self._create_fake_instance_obj(
{'host': self.compute.host}),
self._create_fake_instance_obj(
{'host': self.compute.host})
]
# those are already been evacuated to other host
evacuated_instance = self._create_fake_instance_obj(
{'host': 'otherhost'})
instances.append(evacuated_instance)
self.mox.StubOutWithMock(self.compute,
'_get_instances_on_driver')
self.mox.StubOutWithMock(self.compute,
'_get_instance_nw_info')
self.mox.StubOutWithMock(self.compute,
'_get_instance_volume_block_device_info')
self.mox.StubOutWithMock(self.compute,
'_is_instance_storage_shared')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.compute._get_instances_on_driver(
fake_context, {'deleted': False}).AndReturn(instances)
self.compute._get_instance_nw_info(fake_context,
evacuated_instance).AndReturn(
'fake_network_info')
self.compute._get_instance_volume_block_device_info(
fake_context, evacuated_instance).AndReturn('fake_bdi')
self.compute._is_instance_storage_shared(fake_context,
evacuated_instance).AndReturn(True)
self.compute.driver.destroy(fake_context, evacuated_instance,
'fake_network_info',
'fake_bdi',
False)
self.mox.ReplayAll()
self.compute._destroy_evacuated_instances(fake_context)
def test_destroy_evacuated_instance_with_disks(self):
fake_context = context.get_admin_context()
# instances in central db
instances = [
# those are still related to this host
self._create_fake_instance_obj(
{'host': self.compute.host}),
self._create_fake_instance(
{'host': self.compute.host}),
self._create_fake_instance(
{'host': self.compute.host})
]
# those are already been evacuated to other host
evacuated_instance = self._create_fake_instance_obj(
{'host': 'otherhost'})
instances.append(evacuated_instance)
self.mox.StubOutWithMock(self.compute,
'_get_instances_on_driver')
self.mox.StubOutWithMock(self.compute,
'_get_instance_nw_info')
self.mox.StubOutWithMock(self.compute,
'_get_instance_volume_block_device_info')
self.mox.StubOutWithMock(self.compute.driver,
'check_instance_shared_storage_local')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'check_instance_shared_storage')
self.mox.StubOutWithMock(self.compute.driver,
'check_instance_shared_storage_cleanup')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.compute._get_instances_on_driver(
fake_context, {'deleted': False}).AndReturn(instances)
self.compute._get_instance_nw_info(fake_context,
evacuated_instance).AndReturn(
'fake_network_info')
self.compute._get_instance_volume_block_device_info(
fake_context, evacuated_instance).AndReturn('fake_bdi')
self.compute.driver.check_instance_shared_storage_local(fake_context,
evacuated_instance).AndReturn({'filename': 'tmpfilename'})
self.compute.compute_rpcapi.check_instance_shared_storage(fake_context,
obj_base.obj_to_primitive(evacuated_instance),
{'filename': 'tmpfilename'}).AndReturn(False)
self.compute.driver.check_instance_shared_storage_cleanup(fake_context,
{'filename': 'tmpfilename'})
self.compute.driver.destroy(fake_context, evacuated_instance,
'fake_network_info',
'fake_bdi',
True)
self.mox.ReplayAll()
self.compute._destroy_evacuated_instances(fake_context)
def test_destroy_evacuated_instance_not_implemented(self):
fake_context = context.get_admin_context()
# instances in central db
instances = [
# those are still related to this host
self._create_fake_instance_obj(
{'host': self.compute.host}),
self._create_fake_instance_obj(
{'host': self.compute.host}),
self._create_fake_instance_obj(
{'host': self.compute.host})
]
# those are already been evacuated to other host
evacuated_instance = self._create_fake_instance_obj(
{'host': 'otherhost'})
instances.append(evacuated_instance)
self.mox.StubOutWithMock(self.compute,
'_get_instances_on_driver')
self.mox.StubOutWithMock(self.compute,
'_get_instance_nw_info')
self.mox.StubOutWithMock(self.compute,
'_get_instance_volume_block_device_info')
self.mox.StubOutWithMock(self.compute.driver,
'check_instance_shared_storage_local')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'check_instance_shared_storage')
self.mox.StubOutWithMock(self.compute.driver,
'check_instance_shared_storage_cleanup')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.compute._get_instances_on_driver(
fake_context, {'deleted': False}).AndReturn(instances)
self.compute._get_instance_nw_info(fake_context,
evacuated_instance).AndReturn(
'fake_network_info')
self.compute._get_instance_volume_block_device_info(
fake_context, evacuated_instance).AndReturn('fake_bdi')
self.compute.driver.check_instance_shared_storage_local(fake_context,
evacuated_instance).AndRaise(NotImplementedError())
self.compute.driver.destroy(fake_context, evacuated_instance,
'fake_network_info',
'fake_bdi',
True)
self.mox.ReplayAll()
self.compute._destroy_evacuated_instances(fake_context)
def test_complete_partial_deletion(self):
admin_context = context.get_admin_context()
instance = instance_obj.Instance()
instance.id = 1
instance.uuid = 'fake-uuid'
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.system_metadata = {'fake_key': 'fake_value'}
instance.vcpus = 1
instance.memory_mb = 1
instance.project_id = 'fake-prj'
instance.user_id = 'fake-user'
instance.deleted = False
def fake_destroy():
instance.deleted = True
self.stubs.Set(instance, 'destroy', fake_destroy)
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
lambda *a, **k: None)
self.stubs.Set(self.compute,
'_complete_deletion',
lambda *a, **k: None)
self.stubs.Set(quotas_obj.Quotas, 'reserve', lambda *a, **k: None)
self.compute._complete_partial_deletion(admin_context, instance)
self.assertFalse(instance.deleted == 0)
def test_init_instance_for_partial_deletion(self):
admin_context = context.get_admin_context()
instance = instance_obj.Instance(admin_context)
instance.id = 1
instance.vm_state = vm_states.DELETED
instance.deleted = False
def fake_partial_deletion(context, instance):
instance['deleted'] = instance['id']
self.stubs.Set(self.compute,
'_complete_partial_deletion',
fake_partial_deletion)
self.compute._init_instance(admin_context, instance)
self.assertFalse(instance['deleted'] == 0)
def test_partial_deletion_raise_exception(self):
admin_context = context.get_admin_context()
instance = instance_obj.Instance(admin_context)
instance.id = 1
instance.vm_state = vm_states.DELETED
instance.deleted = False
self.mox.StubOutWithMock(self.compute, '_complete_partial_deletion')
self.compute._complete_partial_deletion(
admin_context, instance).AndRaise(ValueError)
self.mox.ReplayAll()
self.compute._init_instance(admin_context, instance)
def test_add_remove_fixed_ip_updates_instance_updated_at(self):
def _noop(*args, **kwargs):
pass
self.stubs.Set(self.compute.network_api,
'add_fixed_ip_to_instance', _noop)
self.stubs.Set(self.compute.network_api,
'remove_fixed_ip_from_instance', _noop)
instance = self._create_fake_instance_obj()
updated_at_1 = instance['updated_at']
self.compute.add_fixed_ip_to_instance(self.context, 'fake', instance)
updated_at_2 = db.instance_get_by_uuid(self.context,
instance['uuid'])['updated_at']
self.compute.remove_fixed_ip_from_instance(self.context, 'fake',
self._objectify(instance))
updated_at_3 = db.instance_get_by_uuid(self.context,
instance['uuid'])['updated_at']
updated_ats = (updated_at_1, updated_at_2, updated_at_3)
self.assertEqual(len(updated_ats), len(set(updated_ats)))
def test_no_pending_deletes_for_soft_deleted_instances(self):
self.flags(reclaim_instance_interval=0)
ctxt = context.get_admin_context()
instance = self._create_fake_instance(
params={'host': CONF.host,
'vm_state': vm_states.SOFT_DELETED,
'deleted_at': timeutils.utcnow()})
self.compute._run_pending_deletes(ctxt)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertFalse(instance['cleaned'])
def test_reclaim_queued_deletes(self):
self.flags(reclaim_instance_interval=3600)
ctxt = context.get_admin_context()
# Active
self._create_fake_instance(params={'host': CONF.host})
# Deleted not old enough
self._create_fake_instance(params={'host': CONF.host,
'vm_state': vm_states.SOFT_DELETED,
'deleted_at': timeutils.utcnow()})
# Deleted old enough (only this one should be reclaimed)
deleted_at = (timeutils.utcnow() -
datetime.timedelta(hours=1, minutes=5))
self._create_fake_instance(
params={'host': CONF.host,
'vm_state': vm_states.SOFT_DELETED,
'deleted_at': deleted_at})
# Restoring
# NOTE(hanlind): This specifically tests for a race condition
# where restoring a previously soft deleted instance sets
# deleted_at back to None, causing reclaim to think it can be
# deleted, see LP #1186243.
self._create_fake_instance(
params={'host': CONF.host,
'vm_state': vm_states.SOFT_DELETED,
'task_state': task_states.RESTORING})
self.mox.StubOutWithMock(self.compute, '_delete_instance')
self.compute._delete_instance(
ctxt, mox.IsA(instance_obj.Instance), [],
mox.IsA(quotas_obj.Quotas))
self.mox.ReplayAll()
self.compute._reclaim_queued_deletes(ctxt)
def test_reclaim_queued_deletes_continue_on_error(self):
# Verify that reclaim continues on error.
self.flags(reclaim_instance_interval=3600)
ctxt = context.get_admin_context()
deleted_at = (timeutils.utcnow() -
datetime.timedelta(hours=1, minutes=5))
instance1 = self._create_fake_instance_obj(
params={'host': CONF.host,
'vm_state': vm_states.SOFT_DELETED,
'deleted_at': deleted_at})
instance2 = self._create_fake_instance_obj(
params={'host': CONF.host,
'vm_state': vm_states.SOFT_DELETED,
'deleted_at': deleted_at})
instances = []
instances.append(instance1)
instances.append(instance2)
self.mox.StubOutWithMock(instance_obj.InstanceList,
'get_by_filters')
self.mox.StubOutWithMock(self.compute, '_deleted_old_enough')
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMappingList,
'get_by_instance_uuid')
self.mox.StubOutWithMock(self.compute, '_delete_instance')
instance_obj.InstanceList.get_by_filters(
ctxt, mox.IgnoreArg(),
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
use_slave=True
).AndReturn(instances)
# The first instance delete fails.
self.compute._deleted_old_enough(instance1, 3600).AndReturn(True)
block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance1.uuid).AndReturn([])
self.compute._delete_instance(ctxt, instance1,
[], self.none_quotas).AndRaise(
test.TestingException)
# The second instance delete that follows.
self.compute._deleted_old_enough(instance2, 3600).AndReturn(True)
block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance2.uuid).AndReturn([])
self.compute._delete_instance(ctxt, instance2,
[], self.none_quotas)
self.mox.ReplayAll()
self.compute._reclaim_queued_deletes(ctxt)
def test_sync_power_states(self):
ctxt = self.context.elevated()
self._create_fake_instance({'host': self.compute.host})
self._create_fake_instance({'host': self.compute.host})
self._create_fake_instance({'host': self.compute.host})
self.mox.StubOutWithMock(self.compute.driver, 'get_info')
self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
# Check to make sure task continues on error.
self.compute.driver.get_info(mox.IgnoreArg()).AndRaise(
exception.InstanceNotFound(instance_id='fake-uuid'))
self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
power_state.NOSTATE).AndRaise(
exception.InstanceNotFound(instance_id='fake-uuid'))
self.compute.driver.get_info(mox.IgnoreArg()).AndReturn(
{'state': power_state.RUNNING})
self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
power_state.RUNNING,
use_slave=True)
self.compute.driver.get_info(mox.IgnoreArg()).AndReturn(
{'state': power_state.SHUTDOWN})
self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
power_state.SHUTDOWN,
use_slave=True)
self.mox.ReplayAll()
self.compute._sync_power_states(ctxt)
def _test_lifecycle_event(self, lifecycle_event, power_state):
instance = self._create_fake_instance()
uuid = instance['uuid']
self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
if power_state != None:
self.compute._sync_instance_power_state(
mox.IgnoreArg(),
mox.ContainsKeyValue('uuid', uuid),
power_state)
self.mox.ReplayAll()
self.compute.handle_events(event.LifecycleEvent(uuid, lifecycle_event))
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_lifecycle_events(self):
self._test_lifecycle_event(event.EVENT_LIFECYCLE_STOPPED,
power_state.SHUTDOWN)
self._test_lifecycle_event(event.EVENT_LIFECYCLE_STARTED,
power_state.RUNNING)
self._test_lifecycle_event(event.EVENT_LIFECYCLE_PAUSED,
power_state.PAUSED)
self._test_lifecycle_event(event.EVENT_LIFECYCLE_RESUMED,
power_state.RUNNING)
self._test_lifecycle_event(-1, None)
def test_lifecycle_event_non_existent_instance(self):
# No error raised for non-existent instance because of inherent race
# between database updates and hypervisor events. See bug #1180501.
event_instance = event.LifecycleEvent('does-not-exist',
event.EVENT_LIFECYCLE_STOPPED)
self.compute.handle_events(event_instance)
def test_allow_confirm_resize_on_instance_in_deleting_task_state(self):
instance = self._create_fake_instance_obj()
old_type = flavors.extract_flavor(instance)
new_type = flavors.get_flavor_by_flavor_id('4')
sys_meta = instance.system_metadata
sys_meta = flavors.save_flavor_info(sys_meta,
old_type, 'old_')
sys_meta = flavors.save_flavor_info(sys_meta,
new_type, 'new_')
sys_meta = flavors.save_flavor_info(sys_meta,
new_type)
fake_rt = self.mox.CreateMockAnything()
def fake_drop_resize_claim(*args, **kwargs):
pass
def fake_get_resource_tracker(self):
return fake_rt
def fake_setup_networks_on_host(self, *args, **kwargs):
pass
self.stubs.Set(fake_rt, 'drop_resize_claim', fake_drop_resize_claim)
self.stubs.Set(self.compute, '_get_resource_tracker',
fake_get_resource_tracker)
self.stubs.Set(self.compute.network_api, 'setup_networks_on_host',
fake_setup_networks_on_host)
migration = migration_obj.Migration()
migration.instance_uuid = instance.uuid
migration.status = 'finished'
migration.create(self.context.elevated())
instance.task_state = task_states.DELETING
instance.vm_state = vm_states.RESIZED
instance.system_metadata = sys_meta
instance.save()
self.compute.confirm_resize(self.context, instance=instance,
migration=migration, reservations=[])
instance.refresh()
self.assertEqual(vm_states.ACTIVE, instance['vm_state'])
def _get_instance_and_bdm_for_dev_defaults_tests(self):
instance = self._create_fake_instance(
params={'root_device_name': '/dev/vda'})
block_device_mapping = block_device_obj.block_device_make_list(
self.context, [fake_block_device.FakeDbBlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vda',
'source_type': 'volume',
'destination_type': 'volume',
'image_id': 'fake-image-id-1',
'boot_index': 0})])
return instance, block_device_mapping
def test_default_block_device_names_empty_instance_root_dev(self):
instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
instance['root_device_name'] = None
self.mox.StubOutWithMock(self.compute, '_instance_update')
self.mox.StubOutWithMock(self.compute,
'_default_device_names_for_instance')
self.compute._instance_update(self.context, instance['uuid'],
root_device_name='/dev/vda')
self.compute._default_device_names_for_instance(instance,
'/dev/vda', [], [],
[bdm for bdm in bdms])
self.mox.ReplayAll()
self.compute._default_block_device_names(self.context,
instance,
{}, bdms)
def test_default_block_device_names_empty_root_device(self):
instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
bdms[0]['device_name'] = None
self.mox.StubOutWithMock(self.compute, '_instance_update')
self.mox.StubOutWithMock(self.compute,
'_default_device_names_for_instance')
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping, 'save')
bdms[0].save().AndReturn(None)
self.compute._default_device_names_for_instance(instance,
'/dev/vda', [], [],
[bdm for bdm in bdms])
self.mox.ReplayAll()
self.compute._default_block_device_names(self.context,
instance,
{}, bdms)
def test_default_block_device_names_no_root_device(self):
instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
instance['root_device_name'] = None
bdms[0]['device_name'] = None
self.mox.StubOutWithMock(self.compute, '_instance_update')
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping, 'save')
self.mox.StubOutWithMock(self.compute,
'_default_root_device_name')
self.mox.StubOutWithMock(self.compute,
'_default_device_names_for_instance')
self.compute._default_root_device_name(instance, mox.IgnoreArg(),
bdms[0]).AndReturn('/dev/vda')
self.compute._instance_update(self.context, instance['uuid'],
root_device_name='/dev/vda')
bdms[0].save().AndReturn(None)
self.compute._default_device_names_for_instance(instance,
'/dev/vda', [], [],
[bdm for bdm in bdms])
self.mox.ReplayAll()
self.compute._default_block_device_names(self.context,
instance,
{}, bdms)
def test_reserve_block_device_name(self):
instance = self._create_fake_instance_obj(
params={'root_device_name': '/dev/vda'})
bdm = block_device_obj.BlockDeviceMapping(
**{'source_type': 'image', 'destination_type': 'local',
'image_id': 'fake-image-id', 'device_name': '/dev/vda',
'instance_uuid': instance.uuid})
bdm.create(self.context)
dev = self.compute.reserve_block_device_name(
self.context, instance, '/dev/vdb',
'fake-volume-id', 'virtio', 'disk')
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
self.context, instance.uuid)
bdms = list(bdms)
self.assertEqual(len(bdms), 2)
bdms.sort(key=operator.attrgetter('device_name'))
vol_bdm = bdms[1]
self.assertEqual(vol_bdm.source_type, 'volume')
self.assertEqual(vol_bdm.destination_type, 'volume')
self.assertEqual(vol_bdm.device_name, '/dev/vdb')
self.assertEqual(vol_bdm.volume_id, 'fake-volume-id')
self.assertEqual(vol_bdm.disk_bus, 'virtio')
self.assertEqual(vol_bdm.device_type, 'disk')
class ComputeAPITestCase(BaseTestCase):
def setUp(self):
def fake_get_nw_info(cls, ctxt, instance):
self.assertTrue(ctxt.is_admin)
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
super(ComputeAPITestCase, self).setUp()
self.stubs.Set(network_api.API, 'get_instance_nw_info',
fake_get_nw_info)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
self.fake_image = {
'id': 1,
'name': 'fake_name',
'status': 'active',
'properties': {'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id'},
}
def fake_show(obj, context, image_id):
if image_id:
return self.fake_image
else:
raise exception.ImageNotFound(image_id=image_id)
self.fake_show = fake_show
def _run_instance(self, params=None):
instance = jsonutils.to_primitive(self._create_fake_instance(params,
services=True))
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertIsNone(instance['task_state'])
return instance, instance_uuid
def test_create_with_too_little_ram(self):
# Test an instance type with too little memory.
inst_type = flavors.get_default_flavor()
inst_type['memory_mb'] = 1
self.fake_image['min_ram'] = 2
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.FlavorMemoryTooSmall,
self.compute_api.create, self.context,
inst_type, self.fake_image['id'])
# Now increase the inst_type memory and make sure all is fine.
inst_type['memory_mb'] = 2
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, self.fake_image['id'])
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_too_little_disk(self):
# Test an instance type with too little disk space.
inst_type = flavors.get_default_flavor()
inst_type['root_gb'] = 1
self.fake_image['min_disk'] = 2
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.FlavorDiskTooSmall,
self.compute_api.create, self.context,
inst_type, self.fake_image['id'])
# Now increase the inst_type disk space and make sure all is fine.
inst_type['root_gb'] = 2
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, self.fake_image['id'])
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_too_large_image(self):
# Test an instance type with too little disk space.
inst_type = flavors.get_default_flavor()
inst_type['root_gb'] = 1
self.fake_image['size'] = '1073741825'
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.FlavorDiskTooSmall,
self.compute_api.create, self.context,
inst_type, self.fake_image['id'])
# Reduce image to 1 GB limit and ensure it works
self.fake_image['size'] = '1073741824'
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, self.fake_image['id'])
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_just_enough_ram_and_disk(self):
# Test an instance type with just enough ram and disk space.
inst_type = flavors.get_default_flavor()
inst_type['root_gb'] = 2
inst_type['memory_mb'] = 2
self.fake_image['min_ram'] = 2
self.fake_image['min_disk'] = 2
self.fake_image['name'] = 'fake_name'
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, self.fake_image['id'])
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_no_ram_and_disk_reqs(self):
# Test an instance type with no min_ram or min_disk.
inst_type = flavors.get_default_flavor()
inst_type['root_gb'] = 1
inst_type['memory_mb'] = 1
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, self.fake_image['id'])
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_deleted_image(self):
# If we're given a deleted image by glance, we should not be able to
# build from it
inst_type = flavors.get_default_flavor()
self.fake_image['name'] = 'fake_name'
self.fake_image['status'] = 'DELETED'
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
expected_message = (
exception.ImageNotActive.msg_fmt % {'image_id':
self.fake_image['id']})
with testtools.ExpectedException(exception.ImageNotActive,
expected_message):
self.compute_api.create(self.context, inst_type,
self.fake_image['id'])
def test_create_instance_defaults_display_name(self):
# Verify that an instance cannot be created without a display_name.
cases = [dict(), dict(display_name=None)]
for instance in cases:
(ref, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(),
'fake-image-uuid', **instance)
try:
self.assertIsNotNone(ref[0]['display_name'])
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_instance_sets_system_metadata(self):
# Make sure image properties are copied into system metadata.
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=flavors.get_default_flavor(),
image_href='fake-image-uuid')
try:
sys_metadata = db.instance_system_metadata_get(self.context,
ref[0]['uuid'])
image_props = {'image_kernel_id': 'fake_kernel_id',
'image_ramdisk_id': 'fake_ramdisk_id',
'image_something_else': 'meow', }
for key, value in image_props.iteritems():
self.assertIn(key, sys_metadata)
self.assertEqual(value, sys_metadata[key])
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_saves_type_in_system_metadata(self):
instance_type = flavors.get_default_flavor()
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=instance_type,
image_href='some-fake-image')
try:
sys_metadata = db.instance_system_metadata_get(self.context,
ref[0]['uuid'])
instance_type_props = ['name', 'memory_mb', 'vcpus', 'root_gb',
'ephemeral_gb', 'flavorid', 'swap',
'rxtx_factor', 'vcpu_weight']
for key in instance_type_props:
sys_meta_key = "instance_type_%s" % key
self.assertIn(sys_meta_key, sys_metadata)
self.assertEqual(str(instance_type[key]),
str(sys_metadata[sys_meta_key]))
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_instance_associates_security_groups(self):
# Make sure create associates security groups.
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=flavors.get_default_flavor(),
image_href='some-fake-image',
security_group=['testgroup'])
try:
self.assertEqual(len(db.security_group_get_by_instance(
self.context, ref[0]['uuid'])), 1)
group = db.security_group_get(self.context, group['id'])
self.assertTrue(len(group['instances']) == 1)
finally:
db.security_group_destroy(self.context, group['id'])
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_instance_with_invalid_security_group_raises(self):
instance_type = flavors.get_default_flavor()
pre_build_len = len(db.instance_get_all(self.context))
self.assertRaises(exception.SecurityGroupNotFoundForProject,
self.compute_api.create,
self.context,
instance_type=instance_type,
image_href=None,
security_group=['this_is_a_fake_sec_group'])
self.assertEqual(pre_build_len,
len(db.instance_get_all(self.context)))
def test_create_with_large_user_data(self):
# Test an instance type with too much user data.
inst_type = flavors.get_default_flavor()
self.fake_image['min_ram'] = 2
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.InstanceUserDataTooLarge,
self.compute_api.create, self.context, inst_type,
self.fake_image['id'], user_data=('1' * 65536))
def test_create_with_malformed_user_data(self):
# Test an instance type with malformed user data.
inst_type = flavors.get_default_flavor()
self.fake_image['min_ram'] = 2
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.InstanceUserDataMalformed,
self.compute_api.create, self.context, inst_type,
self.fake_image['id'], user_data='banana')
def test_create_with_base64_user_data(self):
# Test an instance type with ok much user data.
inst_type = flavors.get_default_flavor()
self.fake_image['min_ram'] = 2
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
# NOTE(mikal): a string of length 48510 encodes to 65532 characters of
# base64
(refs, resv_id) = self.compute_api.create(
self.context, inst_type, self.fake_image['id'],
user_data=base64.encodestring('1' * 48510))
db.instance_destroy(self.context, refs[0]['uuid'])
def test_populate_instance_for_create(self):
base_options = {'image_ref': self.fake_image['id'],
'system_metadata': {'fake': 'value'}}
instance = instance_obj.Instance()
instance.update(base_options)
inst_type = flavors.get_flavor_by_name("m1.tiny")
instance = self.compute_api._populate_instance_for_create(
instance,
self.fake_image,
1,
security_groups=None,
instance_type=inst_type)
self.assertEqual(str(base_options['image_ref']),
instance['system_metadata']['image_base_image_ref'])
self.assertEqual(vm_states.BUILDING, instance['vm_state'])
self.assertEqual(task_states.SCHEDULING, instance['task_state'])
self.assertEqual(1, instance['launch_index'])
self.assertIsNotNone(instance.get('uuid'))
self.assertEqual([], instance.security_groups.objects)
def test_default_hostname_generator(self):
fake_uuids = [str(uuid.uuid4()) for x in xrange(4)]
orig_populate = self.compute_api._populate_instance_for_create
def _fake_populate(base_options, *args, **kwargs):
base_options['uuid'] = fake_uuids.pop(0)
return orig_populate(base_options, *args, **kwargs)
self.stubs.Set(self.compute_api,
'_populate_instance_for_create',
_fake_populate)
cases = [(None, 'server-%s' % fake_uuids[0]),
('Hello, Server!', 'hello-server'),
('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'),
('hello_server', 'hello-server')]
for display_name, hostname in cases:
(ref, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(), image_href='some-fake-image',
display_name=display_name)
try:
self.assertEqual(ref[0]['hostname'], hostname)
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def test_instance_create_adds_to_instance_group(self):
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
group = instance_group_obj.InstanceGroup(self.context)
group.uuid = str(uuid.uuid4())
group.create()
inst_type = flavors.get_default_flavor()
(refs, resv_id) = self.compute_api.create(
self.context, inst_type, self.fake_image['id'],
scheduler_hints={'group': group.uuid})
group = instance_group_obj.InstanceGroup.get_by_uuid(self.context,
group.uuid)
self.assertIn(refs[0]['uuid'], group.members)
db.instance_destroy(self.context, refs[0]['uuid'])
def test_instance_create_auto_creates_group(self):
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
inst_type = flavors.get_default_flavor()
(refs, resv_id) = self.compute_api.create(
self.context, inst_type, self.fake_image['id'],
scheduler_hints={'group': 'groupname'})
group = instance_group_obj.InstanceGroup.get_by_name(self.context,
'groupname')
self.assertEqual('groupname', group.name)
self.assertIn('legacy', group.policies)
self.assertEqual(1, len(group.members))
self.assertIn(refs[0]['uuid'], group.members)
# On a second instance, make sure it gets added to the group that was
# auto-created above
(refs2, resv_id) = self.compute_api.create(
self.context, inst_type, self.fake_image['id'],
scheduler_hints={'group': 'groupname'})
group = instance_group_obj.InstanceGroup.get_by_name(self.context,
'groupname')
self.assertEqual('groupname', group.name)
self.assertIn('legacy', group.policies)
self.assertEqual(2, len(group.members))
self.assertIn(refs[0]['uuid'], group.members)
self.assertIn(refs2[0]['uuid'], group.members)
db.instance_destroy(self.context, refs[0]['uuid'])
def test_destroy_instance_disassociates_security_groups(self):
# Make sure destroying disassociates security groups.
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=flavors.get_default_flavor(),
image_href='some-fake-image',
security_group=['testgroup'])
try:
db.instance_destroy(self.context, ref[0]['uuid'])
group = db.security_group_get(self.context, group['id'])
self.assertTrue(len(group['instances']) == 0)
finally:
db.security_group_destroy(self.context, group['id'])
def test_destroy_security_group_disassociates_instances(self):
# Make sure destroying security groups disassociates instances.
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=flavors.get_default_flavor(),
image_href='some-fake-image',
security_group=['testgroup'])
try:
db.security_group_destroy(self.context, group['id'])
admin_deleted_context = context.get_admin_context(
read_deleted="only")
group = db.security_group_get(admin_deleted_context, group['id'])
self.assertTrue(len(group['instances']) == 0)
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
def _test_rebuild(self, vm_state):
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
instance = instance_obj.Instance.get_by_uuid(self.context,
instance_uuid)
self.assertIsNone(instance.task_state)
# Set some image metadata that should get wiped out and reset
# as well as some other metadata that should be preserved.
instance.system_metadata.update({
'image_kernel_id': 'old-data',
'image_ramdisk_id': 'old_data',
'image_something_else': 'old-data',
'image_should_remove': 'bye-bye',
'preserved': 'preserve this!'})
instance.save()
# Make sure Compute API updates the image_ref before casting to
# compute manager.
orig_update = self.compute_api.update
info = {'image_ref': None, 'clean': False}
def fake_rpc_rebuild(context, **kwargs):
info['image_ref'] = kwargs['instance'].image_ref
info['clean'] = kwargs['instance'].obj_what_changed() == set()
self.stubs.Set(self.compute_api.compute_rpcapi, 'rebuild_instance',
fake_rpc_rebuild)
image_ref = instance["image_ref"] + '-new_image_ref'
password = "new_password"
instance.vm_state = vm_state
instance.save()
self.compute_api.rebuild(self.context, instance, image_ref, password)
self.assertEqual(info['image_ref'], image_ref)
self.assertTrue(info['clean'])
instance.refresh()
self.assertEqual(instance.task_state, task_states.REBUILDING)
sys_meta = dict([(k, v) for k, v in instance.system_metadata.items()
if not k.startswith('instance_type')])
self.assertEqual(sys_meta,
{'image_kernel_id': 'fake_kernel_id',
'image_min_disk': '1',
'image_ramdisk_id': 'fake_ramdisk_id',
'image_something_else': 'meow',
'preserved': 'preserve this!'})
instance.destroy()
def test_rebuild(self):
self._test_rebuild(vm_state=vm_states.ACTIVE)
def test_rebuild_in_error_state(self):
self._test_rebuild(vm_state=vm_states.ERROR)
def test_rebuild_in_error_not_launched(self):
instance = jsonutils.to_primitive(
self._create_fake_instance(params={'image_ref': ''}))
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
db.instance_update(self.context, instance['uuid'],
{"vm_state": vm_states.ERROR,
"launched_at": None})
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.rebuild,
self.context,
instance,
instance['image_ref'],
"new password")
def test_rebuild_no_image(self):
instance = self._create_fake_instance_obj(params={'image_ref': ''})
instance_uuid = instance.uuid
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
self.compute_api.rebuild(self.context, instance, '', 'new_password')
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.REBUILDING)
def test_rebuild_with_deleted_image(self):
# If we're given a deleted image by glance, we should not be able to
# rebuild from it
instance = self._create_fake_instance_obj(params={'image_ref': '1'})
self.fake_image['name'] = 'fake_name'
self.fake_image['status'] = 'DELETED'
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
expected_message = (
exception.ImageNotActive.msg_fmt % {'image_id':
self.fake_image['id']})
with testtools.ExpectedException(exception.ImageNotActive,
expected_message):
self.compute_api.rebuild(self.context, instance,
self.fake_image['id'], 'new_password')
def test_rebuild_with_too_little_ram(self):
instance = self._create_fake_instance_obj(params={'image_ref': '1'})
def fake_extract_flavor(_inst, prefix):
self.assertEqual('', prefix)
return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
self.stubs.Set(flavors, 'extract_flavor',
fake_extract_flavor)
self.fake_image['min_ram'] = 128
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.FlavorMemoryTooSmall,
self.compute_api.rebuild, self.context,
instance, self.fake_image['id'], 'new_password')
# Reduce image memory requirements and make sure it works
self.fake_image['min_ram'] = 64
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
db.instance_destroy(self.context, instance['uuid'])
def test_rebuild_with_too_little_disk(self):
instance = self._create_fake_instance_obj(params={'image_ref': '1'})
def fake_extract_flavor(_inst, prefix):
self.assertEqual('', prefix)
return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
self.stubs.Set(flavors, 'extract_flavor',
fake_extract_flavor)
self.fake_image['min_disk'] = 2
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.FlavorDiskTooSmall,
self.compute_api.rebuild, self.context,
instance, self.fake_image['id'], 'new_password')
# Reduce image disk requirements and make sure it works
self.fake_image['min_disk'] = 1
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
db.instance_destroy(self.context, instance['uuid'])
def test_rebuild_with_just_enough_ram_and_disk(self):
instance = self._create_fake_instance_obj(params={'image_ref': '1'})
def fake_extract_flavor(_inst, prefix):
self.assertEqual('', prefix)
return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
self.stubs.Set(flavors, 'extract_flavor',
fake_extract_flavor)
self.fake_image['min_ram'] = 64
self.fake_image['min_disk'] = 1
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
db.instance_destroy(self.context, instance['uuid'])
def test_rebuild_with_no_ram_and_disk_reqs(self):
instance = self._create_fake_instance_obj(params={'image_ref': '1'})
def fake_extract_flavor(_inst, prefix):
self.assertEqual('', prefix)
return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
self.stubs.Set(flavors, 'extract_flavor',
fake_extract_flavor)
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
db.instance_destroy(self.context, instance['uuid'])
def test_rebuild_with_too_large_image(self):
instance = self._create_fake_instance_obj(params={'image_ref': '1'})
def fake_extract_flavor(_inst, prefix):
self.assertEqual('', prefix)
return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
self.stubs.Set(flavors, 'extract_flavor',
fake_extract_flavor)
self.fake_image['size'] = '1073741825'
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
self.assertRaises(exception.FlavorDiskTooSmall,
self.compute_api.rebuild, self.context,
instance, self.fake_image['id'], 'new_password')
# Reduce image to 1 GB limit and ensure it works
self.fake_image['size'] = '1073741824'
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
db.instance_destroy(self.context, instance['uuid'])
def test_hostname_create(self):
# Ensure instance hostname is set during creation.
inst_type = flavors.get_flavor_by_name('m1.tiny')
(instances, _) = self.compute_api.create(self.context,
inst_type,
image_href='some-fake-image',
display_name='test host')
self.assertEqual('test-host', instances[0]['hostname'])
def test_set_admin_password(self):
# Ensure instance can have its admin password set.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
inst_ref = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
self.assertIsNone(inst_ref['task_state'])
def fake_set_admin_password(self, context, **kwargs):
pass
self.stubs.Set(compute_rpcapi.ComputeAPI,
'set_admin_password',
fake_set_admin_password)
self.compute_api.set_admin_password(self.context, inst_ref)
inst_ref = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
self.assertEqual(inst_ref['task_state'],
task_states.UPDATING_PASSWORD)
self.compute.terminate_instance(self.context,
self._objectify(inst_ref), [], [])
def test_rescue_unrescue(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
self.assertIsNone(instance['task_state'])
self.compute_api.rescue(self.context, self._objectify(instance))
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
self.assertEqual(instance['task_state'], task_states.RESCUING)
params = {'vm_state': vm_states.RESCUED, 'task_state': None}
db.instance_update(self.context, instance_uuid, params)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.compute_api.unrescue(self.context, self._objectify(instance))
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['vm_state'], vm_states.RESCUED)
self.assertEqual(instance['task_state'], task_states.UNRESCUING)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def _fake_rescue_block_devices(self, instance, status="in-use"):
fake_bdms = block_device_obj.block_device_make_list(self.context,
[fake_block_device.FakeDbBlockDeviceDict(
{'device_name': '/dev/vda',
'source_type': 'volume',
'boot_index': 0,
'destination_type': 'volume',
'volume_id': 'bf0b6b00-a20c-11e2-9e96-0800200c9a66'})])
volume = {'id': 'bf0b6b00-a20c-11e2-9e96-0800200c9a66',
'state': 'active', 'instance_uuid': instance['uuid']}
return fake_bdms, volume
@mock.patch.object(block_device_obj.BlockDeviceMappingList,
'get_by_instance_uuid')
@mock.patch.object(cinder.API, 'get')
def test_rescue_volume_backed_no_image(self, mock_get_vol, mock_get_bdms):
# Instance started without an image
volume_backed_inst_1 = jsonutils.to_primitive(
self._create_fake_instance({'image_ref': ''}))
bdms, volume = self._fake_rescue_block_devices(volume_backed_inst_1)
mock_get_vol.return_value = {'id': volume['id'], 'status': "in-use"}
mock_get_bdms.return_value = bdms
with mock.patch.object(self.compute, '_prep_block_device'):
self.compute.run_instance(self.context,
volume_backed_inst_1, {}, {}, None, None,
None, True, None, False)
self.assertRaises(exception.InstanceNotRescuable,
self.compute_api.rescue, self.context,
volume_backed_inst_1)
self.compute.terminate_instance(self.context,
self._objectify(volume_backed_inst_1), [], [])
@mock.patch.object(block_device_obj.BlockDeviceMappingList,
'get_by_instance_uuid')
@mock.patch.object(cinder.API, 'get')
def test_rescue_volume_backed_placeholder_image(self,
mock_get_vol,
mock_get_bdms):
# Instance started with a placeholder image (for metadata)
volume_backed_inst_2 = jsonutils.to_primitive(
self._create_fake_instance(
{'image_ref': 'my_placeholder_img',
'root_device_name': '/dev/vda'})
)
bdms, volume = self._fake_rescue_block_devices(volume_backed_inst_2)
mock_get_vol.return_value = {'id': volume['id'], 'status': "in-use"}
mock_get_bdms.return_value = bdms
with mock.patch.object(self.compute, '_prep_block_device'):
self.compute.run_instance(self.context,
volume_backed_inst_2, {}, {}, None, None,
None, True, None, False)
self.assertRaises(exception.InstanceNotRescuable,
self.compute_api.rescue, self.context,
volume_backed_inst_2)
self.compute.terminate_instance(self.context,
self._objectify(volume_backed_inst_2), [], [])
def test_get(self):
# Test get instance.
exp_instance = self._create_fake_instance()
# NOTE(danms): Transform the db object in a similar way as
# the API method will do.
expected = obj_base.obj_to_primitive(
instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), exp_instance,
instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
def fake_db_get(_context, _instance_uuid,
columns_to_join=None, use_slave=False):
return exp_instance
self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get)
instance = self.compute_api.get(self.context, exp_instance['uuid'])
self.assertEqual(unify_instance(expected),
unify_instance(instance))
def test_get_with_admin_context(self):
# Test get instance.
c = context.get_admin_context()
exp_instance = self._create_fake_instance()
# NOTE(danms): Transform the db object in a similar way as
# the API method will do.
expected = obj_base.obj_to_primitive(
instance_obj.Instance._from_db_object(
c, instance_obj.Instance(), exp_instance,
instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
def fake_db_get(context, instance_uuid,
columns_to_join=None, use_slave=False):
return exp_instance
self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get)
instance = self.compute_api.get(c, exp_instance['uuid'])
self.assertEqual(unify_instance(expected),
unify_instance(instance))
def test_get_with_integer_id(self):
# Test get instance with an integer id.
exp_instance = self._create_fake_instance()
# NOTE(danms): Transform the db object in a similar way as
# the API method will do.
expected = obj_base.obj_to_primitive(
instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), exp_instance,
instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
def fake_db_get(_context, _instance_id, columns_to_join=None):
return exp_instance
self.stubs.Set(db, 'instance_get', fake_db_get)
instance = self.compute_api.get(self.context, exp_instance['id'])
self.assertEqual(unify_instance(expected),
unify_instance(instance))
def test_get_all_by_name_regexp(self):
# Test searching instances by name (display_name).
c = context.get_admin_context()
instance1 = self._create_fake_instance({'display_name': 'woot'})
instance2 = self._create_fake_instance({
'display_name': 'woo'})
instance3 = self._create_fake_instance({
'display_name': 'not-woot'})
instances = self.compute_api.get_all(c,
search_opts={'name': '^woo.*'})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertIn(instance1['uuid'], instance_uuids)
self.assertIn(instance2['uuid'], instance_uuids)
instances = self.compute_api.get_all(c,
search_opts={'name': '^woot.*'})
instance_uuids = [instance['uuid'] for instance in instances]
self.assertEqual(len(instances), 1)
self.assertIn(instance1['uuid'], instance_uuids)
instances = self.compute_api.get_all(c,
search_opts={'name': '.*oot.*'})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertIn(instance1['uuid'], instance_uuids)
self.assertIn(instance3['uuid'], instance_uuids)
instances = self.compute_api.get_all(c,
search_opts={'name': '^n.*'})
self.assertEqual(len(instances), 1)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertIn(instance3['uuid'], instance_uuids)
instances = self.compute_api.get_all(c,
search_opts={'name': 'noth.*'})
self.assertEqual(len(instances), 0)
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_all_by_multiple_options_at_once(self, fixed_get, network_get):
# Test searching by multiple options at once.
c = context.get_admin_context()
network_manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = (
network_manager.db.fixed_ips_by_virtual_interface)
network_get.return_value = (
dict(test_network.fake_network,
**network_manager.db.network_get(None, 1)))
self.stubs.Set(self.compute_api.network_api,
'get_instance_uuids_by_ip_filter',
network_manager.get_instance_uuids_by_ip_filter)
instance1 = self._create_fake_instance({
'display_name': 'woot',
'id': 1,
'uuid': '00000000-0000-0000-0000-000000000010'})
instance2 = self._create_fake_instance({
'display_name': 'woo',
'id': 20,
'uuid': '00000000-0000-0000-0000-000000000020'})
instance3 = self._create_fake_instance({
'display_name': 'not-woot',
'id': 30,
'uuid': '00000000-0000-0000-0000-000000000030'})
# ip ends up matching 2nd octet here.. so all 3 match ip
# but 'name' only matches one
instances = self.compute_api.get_all(c,
search_opts={'ip': '.*\.1', 'name': 'not.*'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance3['uuid'])
# ip ends up matching any ip with a '1' in the last octet..
# so instance 1 and 3.. but name should only match #1
# but 'name' only matches one
instances = self.compute_api.get_all(c,
search_opts={'ip': '.*\.1$', 'name': '^woo.*'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance1['uuid'])
# same as above but no match on name (name matches instance1
# but the ip query doesn't
instances = self.compute_api.get_all(c,
search_opts={'ip': '.*\.2$', 'name': '^woot.*'})
self.assertEqual(len(instances), 0)
# ip matches all 3... ipv6 matches #2+#3...name matches #3
instances = self.compute_api.get_all(c,
search_opts={'ip': '.*\.1',
'name': 'not.*',
'ip6': '^.*12.*34.*'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance3['uuid'])
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_image(self):
# Test searching instances by image.
c = context.get_admin_context()
instance1 = self._create_fake_instance({'image_ref': '1234'})
instance2 = self._create_fake_instance({'image_ref': '4567'})
instance3 = self._create_fake_instance({'image_ref': '4567'})
instances = self.compute_api.get_all(c, search_opts={'image': '123'})
self.assertEqual(len(instances), 0)
instances = self.compute_api.get_all(c, search_opts={'image': '1234'})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance1['uuid'])
instances = self.compute_api.get_all(c, search_opts={'image': '4567'})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertIn(instance2['uuid'], instance_uuids)
self.assertIn(instance3['uuid'], instance_uuids)
# Test passing a list as search arg
instances = self.compute_api.get_all(c,
search_opts={'image': ['1234', '4567']})
self.assertEqual(len(instances), 3)
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_flavor(self):
# Test searching instances by image.
c = context.get_admin_context()
instance1 = self._create_fake_instance({'instance_type_id': 1})
instance2 = self._create_fake_instance({'instance_type_id': 2})
instance3 = self._create_fake_instance({'instance_type_id': 2})
# NOTE(comstud): Migrations set up the instance_types table
# for us. Therefore, we assume the following is true for
# these tests:
# instance_type_id 1 == flavor 3
# instance_type_id 2 == flavor 1
# instance_type_id 3 == flavor 4
# instance_type_id 4 == flavor 5
# instance_type_id 5 == flavor 2
instances = self.compute_api.get_all(c,
search_opts={'flavor': 5})
self.assertEqual(len(instances), 0)
# ensure unknown filter maps to an exception
self.assertRaises(exception.FlavorNotFound,
self.compute_api.get_all, c,
search_opts={'flavor': 99})
instances = self.compute_api.get_all(c, search_opts={'flavor': 3})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['id'], instance1['id'])
instances = self.compute_api.get_all(c, search_opts={'flavor': 1})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertIn(instance2['uuid'], instance_uuids)
self.assertIn(instance3['uuid'], instance_uuids)
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_state(self):
# Test searching instances by state.
c = context.get_admin_context()
instance1 = self._create_fake_instance({
'power_state': power_state.SHUTDOWN,
})
instance2 = self._create_fake_instance({
'power_state': power_state.RUNNING,
})
instance3 = self._create_fake_instance({
'power_state': power_state.RUNNING,
})
instances = self.compute_api.get_all(c,
search_opts={'power_state': power_state.SUSPENDED})
self.assertEqual(len(instances), 0)
instances = self.compute_api.get_all(c,
search_opts={'power_state': power_state.SHUTDOWN})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance1['uuid'])
instances = self.compute_api.get_all(c,
search_opts={'power_state': power_state.RUNNING})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertIn(instance2['uuid'], instance_uuids)
self.assertIn(instance3['uuid'], instance_uuids)
# Test passing a list as search arg
instances = self.compute_api.get_all(c,
search_opts={'power_state': [power_state.SHUTDOWN,
power_state.RUNNING]})
self.assertEqual(len(instances), 3)
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_metadata(self):
# Test searching instances by metadata.
c = context.get_admin_context()
instance0 = self._create_fake_instance()
instance1 = self._create_fake_instance({
'metadata': {'key1': 'value1'}})
instance2 = self._create_fake_instance({
'metadata': {'key2': 'value2'}})
instance3 = self._create_fake_instance({
'metadata': {'key3': 'value3'}})
instance4 = self._create_fake_instance({
'metadata': {'key3': 'value3',
'key4': 'value4'}})
# get all instances
instances = self.compute_api.get_all(c,
search_opts={'metadata': {}})
self.assertEqual(len(instances), 5)
# wrong key/value combination
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key1': 'value3'}})
self.assertEqual(len(instances), 0)
# non-existing keys
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key5': 'value1'}})
self.assertEqual(len(instances), 0)
# find existing instance
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key2': 'value2'}})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance2['uuid'])
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key3': 'value3'}})
self.assertEqual(len(instances), 2)
instance_uuids = [instance['uuid'] for instance in instances]
self.assertIn(instance3['uuid'], instance_uuids)
self.assertIn(instance4['uuid'], instance_uuids)
# multiple criteria as a dict
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key3': 'value3',
'key4': 'value4'}})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance4['uuid'])
# multiple criteria as a list
instances = self.compute_api.get_all(c,
search_opts={'metadata': [{'key4': 'value4'},
{'key3': 'value3'}]})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance4['uuid'])
db.instance_destroy(c, instance0['uuid'])
db.instance_destroy(c, instance1['uuid'])
db.instance_destroy(c, instance2['uuid'])
db.instance_destroy(c, instance3['uuid'])
db.instance_destroy(c, instance4['uuid'])
def test_all_instance_metadata(self):
instance1 = self._create_fake_instance({'metadata': {'key1': 'value1'},
'user_id': 'user1',
'project_id': 'project1'})
instance2 = self._create_fake_instance({'metadata': {'key2': 'value2'},
'user_id': 'user2',
'project_id': 'project2'})
_context = self.context
_context.user_id = 'user1'
_context.project_id = 'project1'
metadata = self.compute_api.get_all_instance_metadata(_context,
search_filts=[])
self.assertTrue(len(metadata) == 1)
self.assertEqual(metadata[0]['key'], 'key1')
_context.user_id = 'user2'
_context.project_id = 'project2'
metadata = self.compute_api.get_all_instance_metadata(_context,
search_filts=[])
self.assertTrue(len(metadata) == 1)
self.assertEqual(metadata[0]['key'], 'key2')
_context = context.get_admin_context()
metadata = self.compute_api.get_all_instance_metadata(_context,
search_filts=[])
self.assertTrue(len(metadata) == 2)
def test_instance_metadata(self):
meta_changes = [None]
self.flags(notify_on_state_change='vm_state')
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
meta_changes[0] = diff
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
_context = context.get_admin_context()
instance = self._create_fake_instance_obj({'metadata':
{'key1': 'value1'}})
metadata = self.compute_api.get_instance_metadata(_context, instance)
self.assertEqual(metadata, {'key1': 'value1'})
self.compute_api.update_instance_metadata(_context, instance,
{'key2': 'value2'})
metadata = self.compute_api.get_instance_metadata(_context, instance)
self.assertEqual(metadata, {'key1': 'value1', 'key2': 'value2'})
self.assertEqual(meta_changes, [{'key2': ['+', 'value2']}])
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
msg = fake_notifier.NOTIFICATIONS[0]
payload = msg.payload
self.assertIn('metadata', payload)
self.assertEqual(payload['metadata'], metadata)
new_metadata = {'key2': 'bah', 'key3': 'value3'}
self.compute_api.update_instance_metadata(_context, instance,
new_metadata, delete=True)
metadata = self.compute_api.get_instance_metadata(_context, instance)
self.assertEqual(metadata, new_metadata)
self.assertEqual(meta_changes, [{
'key1': ['-'],
'key2': ['+', 'bah'],
'key3': ['+', 'value3'],
}])
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[1]
payload = msg.payload
self.assertIn('metadata', payload)
self.assertEqual(payload['metadata'], metadata)
self.compute_api.delete_instance_metadata(_context, instance, 'key2')
metadata = self.compute_api.get_instance_metadata(_context, instance)
self.assertEqual(metadata, {'key3': 'value3'})
self.assertEqual(meta_changes, [{'key2': ['-']}])
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3)
msg = fake_notifier.NOTIFICATIONS[2]
payload = msg.payload
self.assertIn('metadata', payload)
self.assertEqual(payload['metadata'], {'key3': 'value3'})
db.instance_destroy(_context, instance['uuid'])
def test_disallow_metadata_changes_during_building(self):
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
pass
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
instance = self._create_fake_instance({'vm_state': vm_states.BUILDING})
instance = dict(instance)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.delete_instance_metadata, self.context,
instance, "key")
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.update_instance_metadata, self.context,
instance, "key")
def test_get_instance_faults(self):
# Get an instances latest fault.
instance = self._create_fake_instance()
fault_fixture = {
'code': 404,
'instance_uuid': instance['uuid'],
'message': "HTTPNotFound",
'details': "Stock details for test",
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
}
def return_fault(_ctxt, instance_uuids):
return dict.fromkeys(instance_uuids, [fault_fixture])
self.stubs.Set(nova.db,
'instance_fault_get_by_instance_uuids',
return_fault)
_context = context.get_admin_context()
output = self.compute_api.get_instance_faults(_context, [instance])
expected = {instance['uuid']: [fault_fixture]}
self.assertEqual(output, expected)
db.instance_destroy(_context, instance['uuid'])
@staticmethod
def _parse_db_block_device_mapping(bdm_ref):
attr_list = ('delete_on_termination', 'device_name', 'no_device',
'virtual_name', 'volume_id', 'volume_size', 'snapshot_id')
bdm = {}
for attr in attr_list:
val = bdm_ref.get(attr, None)
if val:
bdm[attr] = val
return bdm
def test_update_block_device_mapping(self):
swap_size = ephemeral_size = 1
instance_type = {'swap': swap_size, 'ephemeral_gb': ephemeral_size}
instance = self._create_fake_instance()
mappings = [
{'virtual': 'ami', 'device': 'sda1'},
{'virtual': 'root', 'device': '/dev/sda1'},
{'virtual': 'swap', 'device': 'sdb4'},
{'virtual': 'swap', 'device': 'sdb3'},
{'virtual': 'swap', 'device': 'sdb2'},
{'virtual': 'swap', 'device': 'sdb1'},
{'virtual': 'ephemeral0', 'device': 'sdc1'},
{'virtual': 'ephemeral1', 'device': 'sdc2'},
{'virtual': 'ephemeral2', 'device': 'sdc3'}]
block_device_mapping = [
# root
{'device_name': '/dev/sda1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000',
'delete_on_termination': False},
# overwrite swap
{'device_name': '/dev/sdb2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111',
'delete_on_termination': False},
{'device_name': '/dev/sdb3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222'},
{'device_name': '/dev/sdb4',
'no_device': True},
# overwrite ephemeral
{'device_name': '/dev/sdc1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333',
'delete_on_termination': False},
{'device_name': '/dev/sdc2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '33333333-aaaa-bbbb-cccc-444444444444',
'delete_on_termination': False},
{'device_name': '/dev/sdc3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '44444444-aaaa-bbbb-cccc-555555555555'},
{'device_name': '/dev/sdc4',
'no_device': True},
# volume
{'device_name': '/dev/sdd1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '55555555-aaaa-bbbb-cccc-666666666666',
'delete_on_termination': False},
{'device_name': '/dev/sdd2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '66666666-aaaa-bbbb-cccc-777777777777'},
{'device_name': '/dev/sdd3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '77777777-aaaa-bbbb-cccc-888888888888'},
{'device_name': '/dev/sdd4',
'no_device': True}]
image_mapping = self.compute_api._prepare_image_mapping(
instance_type, instance['uuid'], mappings)
self.compute_api._update_block_device_mapping(
self.context, instance_type, instance['uuid'], image_mapping)
bdms = [block_device.BlockDeviceDict(bdm) for bdm in
db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid'])]
expected_result = [
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': 'swap', 'device_name': '/dev/sdb1',
'volume_size': swap_size, 'delete_on_termination': True},
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': CONF.default_ephemeral_format,
'device_name': '/dev/sdc3', 'delete_on_termination': True},
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': CONF.default_ephemeral_format,
'device_name': '/dev/sdc1', 'delete_on_termination': True},
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': CONF.default_ephemeral_format,
'device_name': '/dev/sdc2', 'delete_on_termination': True},
]
bdms.sort(key=operator.itemgetter('device_name'))
expected_result.sort(key=operator.itemgetter('device_name'))
self.assertEqual(len(bdms), len(expected_result))
for expected, got in zip(expected_result, bdms):
self.assertThat(expected, matchers.IsSubDictOf(got))
self.compute_api._update_block_device_mapping(
self.context, flavors.get_default_flavor(),
instance['uuid'], block_device_mapping)
bdms = [block_device.BlockDeviceDict(bdm) for bdm in
db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid'])]
expected_result = [
{'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000',
'device_name': '/dev/sda1'},
{'source_type': 'blank', 'destination_type': 'local',
'guest_format': 'swap', 'device_name': '/dev/sdb1',
'volume_size': swap_size, 'delete_on_termination': True},
{'device_name': '/dev/sdb2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111',
'delete_on_termination': False},
{'device_name': '/dev/sdb3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222'},
{'device_name': '/dev/sdb4', 'no_device': True},
{'device_name': '/dev/sdc1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333',
'delete_on_termination': False},
{'device_name': '/dev/sdc2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '33333333-aaaa-bbbb-cccc-444444444444',
'delete_on_termination': False},
{'device_name': '/dev/sdc3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '44444444-aaaa-bbbb-cccc-555555555555'},
{'no_device': True, 'device_name': '/dev/sdc4'},
{'device_name': '/dev/sdd1',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '55555555-aaaa-bbbb-cccc-666666666666',
'delete_on_termination': False},
{'device_name': '/dev/sdd2',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '66666666-aaaa-bbbb-cccc-777777777777'},
{'device_name': '/dev/sdd3',
'source_type': 'snapshot', 'destination_type': 'volume',
'snapshot_id': '77777777-aaaa-bbbb-cccc-888888888888'},
{'no_device': True, 'device_name': '/dev/sdd4'}]
bdms.sort(key=operator.itemgetter('device_name'))
expected_result.sort(key=operator.itemgetter('device_name'))
self.assertEqual(len(bdms), len(expected_result))
for expected, got in zip(expected_result, bdms):
self.assertThat(expected, matchers.IsSubDictOf(got))
for bdm in db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid']):
db.block_device_mapping_destroy(self.context, bdm['id'])
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
def _test_check_and_transform_bdm(self, bdms, expected_bdms,
image_bdms=None, base_options=None,
legacy_bdms=False,
legacy_image_bdms=False):
image_bdms = image_bdms or []
image_meta = {}
if image_bdms:
image_meta = {'properties': {'block_device_mapping': image_bdms}}
if not legacy_image_bdms:
image_meta['properties']['bdm_v2'] = True
base_options = base_options or {'root_device_name': 'vda',
'image_ref': FAKE_IMAGE_REF}
transformed_bdm = self.compute_api._check_and_transform_bdm(
base_options, image_meta, 1, 1, bdms, legacy_bdms)
self.assertThat(expected_bdms,
matchers.DictListMatches(transformed_bdm))
def test_check_and_transform_legacy_bdm_no_image_bdms(self):
legacy_bdms = [
{'device_name': '/dev/vda',
'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
'delete_on_termination': False}]
expected_bdms = [block_device.BlockDeviceDict.from_legacy(
legacy_bdms[0])]
expected_bdms[0]['boot_index'] = 0
self._test_check_and_transform_bdm(legacy_bdms, expected_bdms,
legacy_bdms=True)
def test_check_and_transform_legacy_bdm_legacy_image_bdms(self):
image_bdms = [
{'device_name': '/dev/vda',
'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
'delete_on_termination': False}]
legacy_bdms = [
{'device_name': '/dev/vdb',
'volume_id': '33333333-aaaa-bbbb-cccc-444444444444',
'delete_on_termination': False}]
expected_bdms = [
block_device.BlockDeviceDict.from_legacy(legacy_bdms[0]),
block_device.BlockDeviceDict.from_legacy(image_bdms[0])]
expected_bdms[0]['boot_index'] = -1
expected_bdms[1]['boot_index'] = 0
self._test_check_and_transform_bdm(legacy_bdms, expected_bdms,
image_bdms=image_bdms,
legacy_bdms=True,
legacy_image_bdms=True)
def test_check_and_transform_legacy_bdm_image_bdms(self):
legacy_bdms = [
{'device_name': '/dev/vdb',
'volume_id': '33333333-aaaa-bbbb-cccc-444444444444',
'delete_on_termination': False}]
image_bdms = [block_device.BlockDeviceDict(
{'source_type': 'volume', 'destination_type': 'volume',
'volume_id': '33333333-aaaa-bbbb-cccc-444444444444',
'boot_index': 0})]
expected_bdms = [
block_device.BlockDeviceDict.from_legacy(legacy_bdms[0]),
image_bdms[0]]
expected_bdms[0]['boot_index'] = -1
self._test_check_and_transform_bdm(legacy_bdms, expected_bdms,
image_bdms=image_bdms,
legacy_bdms=True)
def test_check_and_transform_bdm_no_image_bdms(self):
bdms = [block_device.BlockDeviceDict({'source_type': 'image',
'destination_type': 'local',
'image_id': FAKE_IMAGE_REF,
'boot_index': 0})]
expected_bdms = bdms
self._test_check_and_transform_bdm(bdms, expected_bdms)
def test_check_and_transform_bdm_image_bdms(self):
bdms = [block_device.BlockDeviceDict({'source_type': 'image',
'destination_type': 'local',
'image_id': FAKE_IMAGE_REF,
'boot_index': 0})]
image_bdms = [block_device.BlockDeviceDict(
{'source_type': 'volume', 'destination_type': 'volume',
'volume_id': '33333333-aaaa-bbbb-cccc-444444444444'})]
expected_bdms = bdms + image_bdms
self._test_check_and_transform_bdm(bdms, expected_bdms,
image_bdms=image_bdms)
def test_check_and_transform_bdm_legacy_image_bdms(self):
bdms = [block_device.BlockDeviceDict({'source_type': 'image',
'destination_type': 'local',
'image_id': FAKE_IMAGE_REF,
'boot_index': 0})]
image_bdms = [{'device_name': '/dev/vda',
'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
'delete_on_termination': False}]
expected_bdms = [block_device.BlockDeviceDict.from_legacy(
image_bdms[0])]
expected_bdms[0]['boot_index'] = 0
self._test_check_and_transform_bdm(bdms, expected_bdms,
image_bdms=image_bdms,
legacy_image_bdms=True)
def test_check_and_transform_image(self):
base_options = {'root_device_name': 'vdb',
'image_ref': FAKE_IMAGE_REF}
fake_legacy_bdms = [
{'device_name': '/dev/vda',
'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
'delete_on_termination': False}]
image_meta = {'properties': {'block_device_mapping': [
{'device_name': '/dev/vda',
'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333'}]}}
# We get an image BDM
transformed_bdm = self.compute_api._check_and_transform_bdm(
base_options, {}, 1, 1, fake_legacy_bdms, True)
self.assertEqual(len(transformed_bdm), 2)
# No image BDM created if image already defines a root BDM
base_options['root_device_name'] = 'vda'
transformed_bdm = self.compute_api._check_and_transform_bdm(
base_options, image_meta, 1, 1, [], True)
self.assertEqual(len(transformed_bdm), 1)
# No image BDM created
transformed_bdm = self.compute_api._check_and_transform_bdm(
base_options, {}, 1, 1, fake_legacy_bdms, True)
self.assertEqual(len(transformed_bdm), 1)
# Volumes with multiple instances fails
self.assertRaises(exception.InvalidRequest,
self.compute_api._check_and_transform_bdm,
base_options, {}, 1, 2, fake_legacy_bdms, True)
checked_bdm = self.compute_api._check_and_transform_bdm(
base_options, {}, 1, 1, transformed_bdm, True)
self.assertEqual(checked_bdm, transformed_bdm)
def test_volume_size(self):
ephemeral_size = 2
swap_size = 3
volume_size = 5
swap_bdm = {'source_type': 'blank', 'guest_format': 'swap'}
ephemeral_bdm = {'source_type': 'blank', 'guest_format': None}
volume_bdm = {'source_type': 'volume', 'volume_size': volume_size}
inst_type = {'ephemeral_gb': ephemeral_size, 'swap': swap_size}
self.assertEqual(
self.compute_api._volume_size(inst_type, ephemeral_bdm),
ephemeral_size)
ephemeral_bdm['volume_size'] = 42
self.assertEqual(
self.compute_api._volume_size(inst_type, ephemeral_bdm), 42)
self.assertEqual(
self.compute_api._volume_size(inst_type, swap_bdm),
swap_size)
swap_bdm['volume_size'] = 42
self.assertEqual(
self.compute_api._volume_size(inst_type, swap_bdm), 42)
self.assertEqual(
self.compute_api._volume_size(inst_type, volume_bdm),
volume_size)
def test_is_volume_backed_instance(self):
ctxt = self.context
instance = self._create_fake_instance({'image_ref': ''})
self.assertTrue(
self.compute_api.is_volume_backed_instance(ctxt, instance, None))
instance = self._create_fake_instance({'root_device_name': 'vda'})
self.assertFalse(
self.compute_api.is_volume_backed_instance(
ctxt, instance,
block_device_obj.block_device_make_list(ctxt, [])))
bdms = block_device_obj.block_device_make_list(ctxt,
[fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume',
'device_name': '/dev/vda',
'volume_id': 'fake_volume_id',
'boot_index': 0,
'destination_type': 'volume'})])
self.assertTrue(
self.compute_api.is_volume_backed_instance(ctxt, instance, bdms))
bdms = block_device_obj.block_device_make_list(ctxt,
[fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume',
'device_name': '/dev/vda',
'volume_id': 'fake_volume_id',
'destination_type': 'local',
'boot_index': 0,
'snapshot_id': None}),
fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume',
'device_name': '/dev/vdb',
'boot_index': 1,
'destination_type': 'volume',
'volume_id': 'c2ec2156-d75e-11e2-985b-5254009297d6',
'snapshot_id': None})])
self.assertFalse(
self.compute_api.is_volume_backed_instance(ctxt, instance, bdms))
bdms = block_device_obj.block_device_make_list(ctxt,
[fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume',
'device_name': '/dev/vda',
'snapshot_id': 'de8836ac-d75e-11e2-8271-5254009297d6',
'destination_type': 'volume',
'boot_index': 0,
'volume_id': None})])
self.assertTrue(
self.compute_api.is_volume_backed_instance(ctxt, instance, bdms))
def test_is_volume_backed_instance_no_bdms(self):
ctxt = self.context
instance = self._create_fake_instance()
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMappingList,
'get_by_instance_uuid')
block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance['uuid']).AndReturn(
block_device_obj.block_device_make_list(ctxt, []))
self.mox.ReplayAll()
self.compute_api.is_volume_backed_instance(ctxt, instance, None)
def test_reservation_id_one_instance(self):
"""Verify building an instance has a reservation_id that
matches return value from create.
"""
(refs, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(), image_href='some-fake-image')
try:
self.assertEqual(len(refs), 1)
self.assertEqual(refs[0]['reservation_id'], resv_id)
finally:
db.instance_destroy(self.context, refs[0]['uuid'])
def test_reservation_ids_two_instances(self):
"""Verify building 2 instances at once results in a
reservation_id being returned equal to reservation id set
in both instances.
"""
(refs, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(), image_href='some-fake-image',
min_count=2, max_count=2)
try:
self.assertEqual(len(refs), 2)
self.assertIsNotNone(resv_id)
finally:
for instance in refs:
self.assertEqual(instance['reservation_id'], resv_id)
db.instance_destroy(self.context, refs[0]['uuid'])
def test_multi_instance_display_name_template(self):
self.flags(multi_instance_display_name_template='%(name)s')
(refs, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(), image_href='some-fake-image',
min_count=2, max_count=2, display_name='x')
self.assertEqual(refs[0]['display_name'], 'x')
self.assertEqual(refs[0]['hostname'], 'x')
self.assertEqual(refs[1]['display_name'], 'x')
self.assertEqual(refs[1]['hostname'], 'x')
self.flags(multi_instance_display_name_template='%(name)s-%(count)s')
(refs, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(), image_href='some-fake-image',
min_count=2, max_count=2, display_name='x')
self.assertEqual(refs[0]['display_name'], 'x-1')
self.assertEqual(refs[0]['hostname'], 'x-1')
self.assertEqual(refs[1]['display_name'], 'x-2')
self.assertEqual(refs[1]['hostname'], 'x-2')
self.flags(multi_instance_display_name_template='%(name)s-%(uuid)s')
(refs, resv_id) = self.compute_api.create(self.context,
flavors.get_default_flavor(), image_href='some-fake-image',
min_count=2, max_count=2, display_name='x')
self.assertEqual(refs[0]['display_name'], 'x-%s' % refs[0]['uuid'])
self.assertEqual(refs[0]['hostname'], 'x-%s' % refs[0]['uuid'])
self.assertEqual(refs[1]['display_name'], 'x-%s' % refs[1]['uuid'])
self.assertEqual(refs[1]['hostname'], 'x-%s' % refs[1]['uuid'])
def test_instance_architecture(self):
# Test the instance architecture.
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['architecture'], 'x86_64')
db.instance_destroy(self.context, i_ref['uuid'])
def test_instance_unknown_architecture(self):
# Test if the architecture is unknown.
instance = jsonutils.to_primitive(self._create_fake_instance(
params={'architecture': ''}))
try:
self.compute.run_instance(self.context, instance, {}, {}, None,
None, None, True, None, False)
instance = db.instance_get_by_uuid(self.context,
instance['uuid'])
self.assertNotEqual(instance['architecture'], 'Unknown')
finally:
db.instance_destroy(self.context, instance['uuid'])
def test_instance_name_template(self):
# Test the instance_name template.
self.flags(instance_name_template='instance-%d')
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['name'], 'instance-%d' % i_ref['id'])
db.instance_destroy(self.context, i_ref['uuid'])
self.flags(instance_name_template='instance-%(uuid)s')
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['name'], 'instance-%s' % i_ref['uuid'])
db.instance_destroy(self.context, i_ref['uuid'])
self.flags(instance_name_template='%(id)d-%(uuid)s')
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['name'], '%d-%s' %
(i_ref['id'], i_ref['uuid']))
db.instance_destroy(self.context, i_ref['uuid'])
# not allowed.. default is uuid
self.flags(instance_name_template='%(name)s')
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['name'], i_ref['uuid'])
db.instance_destroy(self.context, i_ref['uuid'])
def test_add_remove_fixed_ip(self):
instance = self._create_fake_instance(params={'host': CONF.host})
self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.add_fixed_ip(self.context, self._objectify(instance),
'1')
self.compute_api.remove_fixed_ip(self.context,
self._objectify(instance),
'192.168.1.1')
self.compute_api.delete(self.context, self._objectify(instance))
def test_attach_volume_invalid(self):
self.assertRaises(exception.InvalidDevicePath,
self.compute_api.attach_volume,
self.context,
{'locked': False, 'vm_state': vm_states.ACTIVE,
'task_state': None,
'launched_at': timeutils.utcnow()},
None,
'/invalid')
def test_no_attach_volume_in_rescue_state(self):
def fake(*args, **kwargs):
pass
def fake_volume_get(self, context, volume_id):
return {'id': volume_id}
self.stubs.Set(cinder.API, 'get', fake_volume_get)
self.stubs.Set(cinder.API, 'check_attach', fake)
self.stubs.Set(cinder.API, 'reserve_volume', fake)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.attach_volume,
self.context,
{'uuid': 'fake_uuid', 'locked': False,
'vm_state': vm_states.RESCUED},
None,
'/dev/vdb')
def test_no_attach_volume_in_suspended_state(self):
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.attach_volume,
self.context,
{'uuid': 'fake_uuid', 'locked': False,
'vm_state': vm_states.SUSPENDED},
{'id': 'fake-volume-id'},
'/dev/vdb')
def test_no_detach_volume_in_rescue_state(self):
# Ensure volume can be detached from instance
params = {'vm_state': vm_states.RESCUED}
instance = self._create_fake_instance(params=params)
volume = {'id': 1, 'attach_status': 'in-use',
'instance_uuid': instance['uuid']}
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.detach_volume,
self.context, instance, volume)
@mock.patch.object(block_device_obj.BlockDeviceMappingList,
'get_by_instance_uuid')
@mock.patch.object(cinder.API, 'get')
def test_no_rescue_in_volume_state_attaching(self,
mock_get_vol,
mock_get_bdms):
# Make sure a VM cannot be rescued while volume is being attached
instance = self._create_fake_instance()
bdms, volume = self._fake_rescue_block_devices(instance)
mock_get_vol.return_value = {'id': volume['id'],
'status': "attaching"}
mock_get_bdms.return_value = bdms
self.assertRaises(exception.InvalidVolume,
self.compute_api.rescue, self.context, instance)
def test_vnc_console(self):
# Make sure we can a vnc console for an instance.
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
fake_console_type = "novnc"
fake_connect_info = {'token': 'fake_token',
'console_type': fake_console_type,
'host': 'fake_console_host',
'port': 'fake_console_port',
'internal_access_path': 'fake_access_path',
'instance_uuid': fake_instance['uuid'],
'access_url': 'fake_console_url'}
rpcapi = compute_rpcapi.ComputeAPI
self.mox.StubOutWithMock(rpcapi, 'get_vnc_console')
rpcapi.get_vnc_console(
self.context, instance=fake_instance,
console_type=fake_console_type).AndReturn(fake_connect_info)
self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi,
'authorize_console')
self.compute_api.consoleauth_rpcapi.authorize_console(
self.context, 'fake_token', fake_console_type, 'fake_console_host',
'fake_console_port', 'fake_access_path', 'fake_uuid')
self.mox.ReplayAll()
console = self.compute_api.get_vnc_console(self.context,
fake_instance, fake_console_type)
self.assertEqual(console, {'url': 'fake_console_url'})
def test_get_vnc_console_no_host(self):
instance = self._create_fake_instance(params={'host': ''})
self.assertRaises(exception.InstanceNotReady,
self.compute_api.get_vnc_console,
self.context, instance, 'novnc')
db.instance_destroy(self.context, instance['uuid'])
def test_spice_console(self):
# Make sure we can a spice console for an instance.
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
fake_console_type = "spice-html5"
fake_connect_info = {'token': 'fake_token',
'console_type': fake_console_type,
'host': 'fake_console_host',
'port': 'fake_console_port',
'internal_access_path': 'fake_access_path',
'instance_uuid': fake_instance['uuid'],
'access_url': 'fake_console_url'}
rpcapi = compute_rpcapi.ComputeAPI
self.mox.StubOutWithMock(rpcapi, 'get_spice_console')
rpcapi.get_spice_console(
self.context, instance=fake_instance,
console_type=fake_console_type).AndReturn(fake_connect_info)
self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi,
'authorize_console')
self.compute_api.consoleauth_rpcapi.authorize_console(
self.context, 'fake_token', fake_console_type, 'fake_console_host',
'fake_console_port', 'fake_access_path', 'fake_uuid')
self.mox.ReplayAll()
console = self.compute_api.get_spice_console(self.context,
fake_instance, fake_console_type)
self.assertEqual(console, {'url': 'fake_console_url'})
def test_get_spice_console_no_host(self):
instance = self._create_fake_instance(params={'host': ''})
self.assertRaises(exception.InstanceNotReady,
self.compute_api.get_spice_console,
self.context, instance, 'spice')
db.instance_destroy(self.context, instance['uuid'])
def test_rdp_console(self):
# Make sure we can a rdp console for an instance.
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
fake_console_type = "rdp-html5"
fake_connect_info = {'token': 'fake_token',
'console_type': fake_console_type,
'host': 'fake_console_host',
'port': 'fake_console_port',
'internal_access_path': 'fake_access_path',
'instance_uuid': fake_instance['uuid'],
'access_url': 'fake_console_url'}
rpcapi = compute_rpcapi.ComputeAPI
self.mox.StubOutWithMock(rpcapi, 'get_rdp_console')
rpcapi.get_rdp_console(
self.context, instance=fake_instance,
console_type=fake_console_type).AndReturn(fake_connect_info)
self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi,
'authorize_console')
self.compute_api.consoleauth_rpcapi.authorize_console(
self.context, 'fake_token', fake_console_type, 'fake_console_host',
'fake_console_port', 'fake_access_path', 'fake_uuid')
self.mox.ReplayAll()
console = self.compute_api.get_rdp_console(self.context,
fake_instance, fake_console_type)
self.assertEqual(console, {'url': 'fake_console_url'})
def test_get_rdp_console_no_host(self):
instance = self._create_fake_instance(params={'host': ''})
self.assertRaises(exception.InstanceNotReady,
self.compute_api.get_rdp_console,
self.context, instance, 'rdp')
db.instance_destroy(self.context, instance['uuid'])
def test_console_output(self):
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
fake_tail_length = 699
fake_console_output = 'fake console output'
rpcapi = compute_rpcapi.ComputeAPI
self.mox.StubOutWithMock(rpcapi, 'get_console_output')
rpcapi.get_console_output(
self.context, instance=fake_instance,
tail_length=fake_tail_length).AndReturn(fake_console_output)
self.mox.ReplayAll()
output = self.compute_api.get_console_output(self.context,
fake_instance, tail_length=fake_tail_length)
self.assertEqual(output, fake_console_output)
def test_console_output_no_host(self):
instance = self._create_fake_instance(params={'host': ''})
self.assertRaises(exception.InstanceNotReady,
self.compute_api.get_console_output,
self.context, instance)
db.instance_destroy(self.context, instance['uuid'])
def test_attach_interface(self):
new_type = flavors.get_flavor_by_flavor_id('4')
sys_meta = flavors.save_flavor_info({}, new_type)
instance = instance_obj.Instance(image_ref='foo',
system_metadata=sys_meta)
self.mox.StubOutWithMock(self.compute.network_api,
'allocate_port_for_instance')
nwinfo = [fake_network_cache_model.new_vif()]
network_id = nwinfo[0]['network']['id']
port_id = nwinfo[0]['id']
req_ip = '1.2.3.4'
self.compute.network_api.allocate_port_for_instance(
self.context, instance, port_id, network_id, req_ip
).AndReturn(nwinfo)
self.mox.ReplayAll()
vif = self.compute.attach_interface(self.context,
instance,
network_id,
port_id,
req_ip)
self.assertEqual(vif['id'], network_id)
return nwinfo, port_id
def test_detach_interface(self):
nwinfo, port_id = self.test_attach_interface()
self.stubs.Set(self.compute, '_get_instance_nw_info',
lambda *a, **k: nwinfo)
self.stubs.Set(self.compute.network_api,
'deallocate_port_for_instance',
lambda a, b, c: [])
instance = instance_obj.Instance()
self.compute.detach_interface(self.context, instance, port_id)
self.assertEqual(self.compute.driver._interfaces, {})
def test_attach_volume(self):
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume', 'destination_type': 'volume',
'volume_id': 'fake-volume-id', 'device_name': '/dev/vdb'})
instance = self._create_fake_instance()
fake_volume = {'id': 'fake-volume-id'}
with contextlib.nested(
mock.patch.object(cinder.API, 'get', return_value=fake_volume),
mock.patch.object(cinder.API, 'check_attach'),
mock.patch.object(cinder.API, 'reserve_volume'),
mock.patch.object(compute_rpcapi.ComputeAPI,
'reserve_block_device_name', return_value='/dev/vdb'),
mock.patch.object(db, 'block_device_mapping_get_by_volume_id',
return_value=fake_bdm),
mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume')
) as (mock_get, mock_check_attach, mock_reserve_vol, mock_reserve_bdm,
mock_bdm_get, mock_attach):
self.compute_api.attach_volume(
self.context, instance, 'fake-volume-id',
'/dev/vdb', 'ide', 'cdrom')
mock_reserve_bdm.assert_called_once_with(
self.context, device='/dev/vdb', instance=instance,
volume_id='fake-volume-id', disk_bus='ide',
device_type='cdrom')
mock_bdm_get.assert_called_once_with(
self.context, 'fake-volume-id', [])
self.assertEqual(mock_get.call_args,
mock.call(self.context, 'fake-volume-id'))
self.assertEqual(mock_check_attach.call_args,
mock.call(
self.context, fake_volume, instance=instance))
mock_reserve_vol.assert_called_once_with(
self.context, 'fake-volume-id')
a, kw = mock_attach.call_args
self.assertEqual(kw['volume_id'], 'fake-volume-id')
self.assertEqual(kw['mountpoint'], '/dev/vdb')
self.assertEqual(kw['bdm'].device_name, '/dev/vdb')
self.assertEqual(kw['bdm'].volume_id, 'fake-volume-id')
def test_attach_volume_no_device(self):
called = {}
def fake_check_attach(*args, **kwargs):
called['fake_check_attach'] = True
def fake_reserve_volume(*args, **kwargs):
called['fake_reserve_volume'] = True
def fake_volume_get(self, context, volume_id):
called['fake_volume_get'] = True
return {'id': volume_id}
def fake_rpc_attach_volume(self, context, **kwargs):
called['fake_rpc_attach_volume'] = True
def fake_rpc_reserve_block_device_name(self, context, **kwargs):
called['fake_rpc_reserve_block_device_name'] = True
self.stubs.Set(cinder.API, 'get', fake_volume_get)
self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
self.stubs.Set(cinder.API, 'reserve_volume',
fake_reserve_volume)
self.stubs.Set(compute_rpcapi.ComputeAPI,
'reserve_block_device_name',
fake_rpc_reserve_block_device_name)
self.stubs.Set(compute_rpcapi.ComputeAPI, 'attach_volume',
fake_rpc_attach_volume)
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
'get_by_volume_id')
block_device_obj.BlockDeviceMapping.get_by_volume_id(
self.context, mox.IgnoreArg()).AndReturn('fake-bdm')
self.mox.ReplayAll()
instance = self._create_fake_instance()
self.compute_api.attach_volume(self.context, instance, 1, device=None)
self.assertTrue(called.get('fake_check_attach'))
self.assertTrue(called.get('fake_reserve_volume'))
self.assertTrue(called.get('fake_reserve_volume'))
self.assertTrue(called.get('fake_rpc_reserve_block_device_name'))
self.assertTrue(called.get('fake_rpc_attach_volume'))
def test_detach_volume(self):
# Ensure volume can be detached from instance
called = {}
instance = self._create_fake_instance()
volume = {'id': 1, 'attach_status': 'in-use',
'instance_uuid': instance['uuid']}
def fake_check_detach(*args, **kwargs):
called['fake_check_detach'] = True
def fake_begin_detaching(*args, **kwargs):
called['fake_begin_detaching'] = True
def fake_rpc_detach_volume(self, context, **kwargs):
called['fake_rpc_detach_volume'] = True
self.stubs.Set(cinder.API, 'check_detach', fake_check_detach)
self.stubs.Set(cinder.API, 'begin_detaching', fake_begin_detaching)
self.stubs.Set(compute_rpcapi.ComputeAPI, 'detach_volume',
fake_rpc_detach_volume)
self.compute_api.detach_volume(self.context,
instance, volume)
self.assertTrue(called.get('fake_check_detach'))
self.assertTrue(called.get('fake_begin_detaching'))
self.assertTrue(called.get('fake_rpc_detach_volume'))
def test_detach_invalid_volume(self):
# Ensure exception is raised while detaching an un-attached volume
instance = {'uuid': 'uuid1',
'locked': False,
'launched_at': timeutils.utcnow(),
'vm_state': vm_states.ACTIVE,
'task_state': None}
volume = {'id': 1, 'attach_status': 'detached'}
self.assertRaises(exception.InvalidVolume,
self.compute_api.detach_volume, self.context,
instance, volume)
def test_detach_unattached_volume(self):
# Ensure exception is raised when volume's idea of attached
# instance doesn't match.
instance = {'uuid': 'uuid1',
'locked': False,
'launched_at': timeutils.utcnow(),
'vm_state': vm_states.ACTIVE,
'task_state': None}
volume = {'id': 1, 'attach_status': 'in-use',
'instance_uuid': 'uuid2'}
self.assertRaises(exception.VolumeUnattached,
self.compute_api.detach_volume, self.context,
instance, volume)
def test_detach_suspended_instance_fails(self):
instance = {'uuid': 'uuid1',
'locked': False,
'launched_at': timeutils.utcnow(),
'vm_state': vm_states.SUSPENDED,
'task_state': None}
volume = {'id': 1, 'attach_status': 'in-use',
'instance_uuid': 'uuid2'}
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.detach_volume, self.context,
instance, volume)
def test_detach_volume_libvirt_is_down(self):
# Ensure rollback during detach if libvirt goes down
called = {}
instance = self._create_fake_instance()
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
{'device_name': '/dev/vdb', 'volume_id': 1,
'source_type': 'snapshot', 'destination_type': 'volume',
'connection_info': '{"test": "test"}'})
def fake_libvirt_driver_instance_exists(*args, **kwargs):
called['fake_libvirt_driver_instance_exists'] = True
return False
def fake_libvirt_driver_detach_volume_fails(*args, **kwargs):
called['fake_libvirt_driver_detach_volume_fails'] = True
raise AttributeError()
def fake_roll_detaching(*args, **kwargs):
called['fake_roll_detaching'] = True
self.stubs.Set(cinder.API, 'roll_detaching', fake_roll_detaching)
self.stubs.Set(self.compute.driver, "instance_exists",
fake_libvirt_driver_instance_exists)
self.stubs.Set(self.compute.driver, "detach_volume",
fake_libvirt_driver_detach_volume_fails)
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
'get_by_volume_id')
block_device_obj.BlockDeviceMapping.get_by_volume_id(
self.context, 1).AndReturn(block_device_obj.BlockDeviceMapping(
**fake_bdm))
self.mox.ReplayAll()
self.assertRaises(AttributeError, self.compute.detach_volume,
self.context, 1, instance)
self.assertTrue(called.get('fake_libvirt_driver_instance_exists'))
self.assertTrue(called.get('fake_roll_detaching'))
def test_terminate_with_volumes(self):
# Make sure that volumes get detached during instance termination.
admin = context.get_admin_context()
instance = self._create_fake_instance_obj()
volume_id = 'fake'
values = {'instance_uuid': instance['uuid'],
'device_name': '/dev/vdc',
'delete_on_termination': False,
'volume_id': volume_id,
}
db.block_device_mapping_create(admin, values)
def fake_volume_get(self, context, volume_id):
return {'id': volume_id}
self.stubs.Set(cinder.API, "get", fake_volume_get)
# Stub out and record whether it gets detached
result = {"detached": False}
def fake_detach(self, context, volume_id_param):
result["detached"] = volume_id_param == volume_id
self.stubs.Set(cinder.API, "detach", fake_detach)
def fake_terminate_connection(self, context, volume_id, connector):
return {}
self.stubs.Set(cinder.API, "terminate_connection",
fake_terminate_connection)
# Kill the instance and check that it was detached
bdms = db.block_device_mapping_get_all_by_instance(admin,
instance['uuid'])
self.compute.terminate_instance(admin, self._objectify(instance), bdms,
[])
self.assertTrue(result["detached"])
def test_terminate_deletes_all_bdms(self):
admin = context.get_admin_context()
instance = self._create_fake_instance_obj()
img_bdm = {'instance_uuid': instance['uuid'],
'device_name': '/dev/vda',
'source_type': 'image',
'destination_type': 'local',
'delete_on_termination': False,
'boot_index': 0,
'image_id': 'fake_image'}
vol_bdm = {'instance_uuid': instance['uuid'],
'device_name': '/dev/vdc',
'source_type': 'volume',
'destination_type': 'volume',
'delete_on_termination': False,
'volume_id': 'fake_vol'}
bdms = []
for bdm in img_bdm, vol_bdm:
bdm_obj = block_device_obj.BlockDeviceMapping(**bdm)
bdm_obj.create(admin)
bdms.append(bdm_obj)
self.stubs.Set(self.compute, 'volume_api', mox.MockAnything())
self.stubs.Set(self.compute, '_prep_block_device', mox.MockAnything())
self.compute.run_instance(self.context, instance, {}, {}, None, None,
None, True, None, False)
self.compute.terminate_instance(self.context,
self._objectify(instance), bdms, [])
bdms = db.block_device_mapping_get_all_by_instance(admin,
instance['uuid'])
self.assertEqual(len(bdms), 0)
def test_inject_network_info(self):
instance = self._create_fake_instance(params={'host': CONF.host})
self.compute.run_instance(self.context,
jsonutils.to_primitive(instance), {}, {}, None, None,
None, True, None, False)
instance = self.compute_api.get(self.context, instance['uuid'],
want_objects=True)
self.compute_api.inject_network_info(self.context, instance)
self.stubs.Set(self.compute_api.network_api,
'deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.delete(self.context, instance)
def test_reset_network(self):
instance = self._create_fake_instance()
self.compute.run_instance(self.context,
jsonutils.to_primitive(instance), {}, {}, None, None,
None, True, None, False)
instance = self.compute_api.get(self.context, instance['uuid'],
want_objects=True)
self.compute_api.reset_network(self.context, instance)
def test_lock(self):
instance = self._create_fake_instance_obj()
self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.lock(self.context, instance)
def test_unlock(self):
instance = self._create_fake_instance_obj()
self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.unlock(self.context, instance)
def test_get_lock(self):
instance = self._create_fake_instance()
self.assertFalse(self.compute_api.get_lock(self.context, instance))
db.instance_update(self.context, instance['uuid'], {'locked': True})
self.assertTrue(self.compute_api.get_lock(self.context, instance))
def test_add_remove_security_group(self):
instance = self._create_fake_instance()
self.compute.run_instance(self.context,
jsonutils.to_primitive(instance), {}, {}, None, None,
None, True, None, False)
instance = self.compute_api.get(self.context, instance['uuid'])
security_group_name = self._create_group()['name']
self.security_group_api.add_to_instance(self.context,
instance,
security_group_name)
self.security_group_api.remove_from_instance(self.context,
instance,
security_group_name)
def test_get_diagnostics(self):
instance = self._create_fake_instance_obj()
rpcapi = compute_rpcapi.ComputeAPI
self.mox.StubOutWithMock(rpcapi, 'get_diagnostics')
rpcapi.get_diagnostics(self.context, instance=instance)
self.mox.ReplayAll()
self.compute_api.get_diagnostics(self.context, instance)
self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.delete(self.context, self._objectify(instance))
def test_secgroup_refresh(self):
instance = self._create_fake_instance()
def rule_get(*args, **kwargs):
mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
return [mock_rule]
def group_get(*args, **kwargs):
mock_group = db_fakes.FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(
self.compute_api.db,
'security_group_rule_get_by_security_group_grantee',
rule_get)
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
rpcapi = self.security_group_api.security_group_rpcapi
self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
rpcapi.refresh_instance_security_rules(self.context,
instance['host'],
instance)
self.mox.ReplayAll()
self.security_group_api.trigger_members_refresh(self.context, [1])
def test_secgroup_refresh_once(self):
instance = self._create_fake_instance()
def rule_get(*args, **kwargs):
mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
return [mock_rule]
def group_get(*args, **kwargs):
mock_group = db_fakes.FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(
self.compute_api.db,
'security_group_rule_get_by_security_group_grantee',
rule_get)
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
rpcapi = self.security_group_api.security_group_rpcapi
self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
rpcapi.refresh_instance_security_rules(self.context,
instance['host'],
instance)
self.mox.ReplayAll()
self.security_group_api.trigger_members_refresh(self.context, [1, 2])
def test_secgroup_refresh_none(self):
def rule_get(*args, **kwargs):
mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
return [mock_rule]
def group_get(*args, **kwargs):
mock_group = db_fakes.FakeModel({'instances': []})
return mock_group
self.stubs.Set(
self.compute_api.db,
'security_group_rule_get_by_security_group_grantee',
rule_get)
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
rpcapi = self.security_group_api.security_group_rpcapi
self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
self.mox.ReplayAll()
self.security_group_api.trigger_members_refresh(self.context, [1])
def test_secrule_refresh(self):
instance = self._create_fake_instance()
def group_get(*args, **kwargs):
mock_group = db_fakes.FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
rpcapi = self.security_group_api.security_group_rpcapi
self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
rpcapi.refresh_instance_security_rules(self.context,
instance['host'],
instance)
self.mox.ReplayAll()
self.security_group_api.trigger_rules_refresh(self.context, [1])
def test_secrule_refresh_once(self):
instance = self._create_fake_instance()
def group_get(*args, **kwargs):
mock_group = db_fakes.FakeModel({'instances': [instance]})
return mock_group
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
rpcapi = self.security_group_api.security_group_rpcapi
self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
rpcapi.refresh_instance_security_rules(self.context,
instance['host'],
instance)
self.mox.ReplayAll()
self.security_group_api.trigger_rules_refresh(self.context, [1, 2])
def test_secrule_refresh_none(self):
def group_get(*args, **kwargs):
mock_group = db_fakes.FakeModel({'instances': []})
return mock_group
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
rpcapi = self.security_group_api.security_group_rpcapi
self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
self.mox.ReplayAll()
self.security_group_api.trigger_rules_refresh(self.context, [1, 2])
def test_live_migrate(self):
instance, instance_uuid = self._run_instance()
instance = self._objectify(instance)
rpcapi = self.compute_api.compute_task_api
self.mox.StubOutWithMock(rpcapi, 'live_migrate_instance')
rpcapi.live_migrate_instance(self.context, instance, 'fake_dest_host',
block_migration=True,
disk_over_commit=True)
self.mox.ReplayAll()
self.compute_api.live_migrate(self.context, instance,
block_migration=True,
disk_over_commit=True,
host_name='fake_dest_host')
instance.refresh()
self.assertEqual(instance['task_state'], task_states.MIGRATING)
def test_evacuate(self):
instance = jsonutils.to_primitive(self._create_fake_instance(
services=True))
instance_uuid = instance['uuid']
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertIsNone(instance['task_state'])
def fake_service_is_up(*args, **kwargs):
return False
def fake_rebuild_instance(*args, **kwargs):
self.assertIn('info_cache', kwargs['instance'])
db.instance_update(self.context, instance_uuid,
{'host': kwargs['host']})
self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up',
fake_service_is_up)
self.stubs.Set(self.compute_api.compute_rpcapi, 'rebuild_instance',
fake_rebuild_instance)
self.compute_api.evacuate(self.context.elevated(),
instance,
host='fake_dest_host',
on_shared_storage=True,
admin_password=None)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.REBUILDING)
self.assertEqual(instance['host'], 'fake_dest_host')
db.instance_destroy(self.context, instance['uuid'])
def test_fail_evacuate_from_non_existing_host(self):
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['launched_at'] = timeutils.utcnow()
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
inst['node'] = NODENAME
type_id = flavors.get_flavor_by_name('m1.tiny')['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = 'x86_64'
inst['os_type'] = 'Linux'
instance = jsonutils.to_primitive(db.instance_create(self.context,
inst))
instance_uuid = instance['uuid']
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertIsNone(instance['task_state'])
self.assertRaises(exception.ComputeHostNotFound,
self.compute_api.evacuate, self.context.elevated(), instance,
host='fake_dest_host', on_shared_storage=True,
admin_password=None)
db.instance_destroy(self.context, instance['uuid'])
def test_fail_evacuate_from_running_host(self):
instance = jsonutils.to_primitive(self._create_fake_instance(
services=True))
instance_uuid = instance['uuid']
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertIsNone(instance['task_state'])
def fake_service_is_up(*args, **kwargs):
return True
self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up',
fake_service_is_up)
self.assertRaises(exception.ComputeServiceInUse,
self.compute_api.evacuate, self.context.elevated(), instance,
host='fake_dest_host', on_shared_storage=True,
admin_password=None)
db.instance_destroy(self.context, instance['uuid'])
def test_fail_evacuate_instance_in_wrong_state(self):
instances = [
jsonutils.to_primitive(self._create_fake_instance(
{'vm_state': vm_states.BUILDING})),
jsonutils.to_primitive(self._create_fake_instance(
{'vm_state': vm_states.PAUSED})),
jsonutils.to_primitive(self._create_fake_instance(
{'vm_state': vm_states.SUSPENDED})),
jsonutils.to_primitive(self._create_fake_instance(
{'vm_state': vm_states.RESCUED})),
jsonutils.to_primitive(self._create_fake_instance(
{'vm_state': vm_states.RESIZED})),
jsonutils.to_primitive(self._create_fake_instance(
{'vm_state': vm_states.SOFT_DELETED})),
jsonutils.to_primitive(self._create_fake_instance(
{'vm_state': vm_states.DELETED})),
jsonutils.to_primitive(self._create_fake_instance(
{'vm_state': vm_states.ERROR}))
]
for instance in instances:
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.evacuate, self.context, instance,
host='fake_dest_host', on_shared_storage=True,
admin_password=None)
db.instance_destroy(self.context, instance['uuid'])
def test_get_migrations(self):
migration = test_migration.fake_db_migration(uuid="1234")
filters = {'host': 'host1'}
self.mox.StubOutWithMock(db, "migration_get_all_by_filters")
db.migration_get_all_by_filters(self.context,
filters).AndReturn([migration])
self.mox.ReplayAll()
migrations = self.compute_api.get_migrations(self.context,
filters)
self.assertEqual(1, len(migrations))
self.assertEqual(migrations[0].id, migration['id'])
def fake_rpc_method(context, method, **kwargs):
pass
def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
'fake_host2'],
'avail_zone2': ['fake_host3'], }):
for avail_zone, hosts in values.iteritems():
for host in hosts:
db.service_create(context,
{'host': host,
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0})
return values
class ComputeAPIAggrTestCase(BaseTestCase):
"""This is for unit coverage of aggregate-related methods
defined in nova.compute.api.
"""
def setUp(self):
super(ComputeAPIAggrTestCase, self).setUp()
self.api = compute_api.AggregateAPI()
self.context = context.get_admin_context()
self.stubs.Set(self.api.compute_rpcapi.client, 'call', fake_rpc_method)
self.stubs.Set(self.api.compute_rpcapi.client, 'cast', fake_rpc_method)
def test_aggregate_no_zone(self):
# Ensure we can create an aggregate without an availability zone
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
None)
self.api.delete_aggregate(self.context, aggr['id'])
db.aggregate_get(self.context.elevated(read_deleted='yes'),
aggr['id'])
self.assertRaises(exception.AggregateNotFound,
self.api.delete_aggregate, self.context, aggr['id'])
def test_check_az_for_aggregate(self):
# Ensure all conflict hosts can be returned
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
fake_host1 = values[fake_zone][0]
fake_host2 = values[fake_zone][1]
aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
fake_zone, fake_host1)
aggr1 = self._init_aggregate_with_host(aggr1, None, None, fake_host2)
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
fake_host2)
aggr2 = self._init_aggregate_with_host(aggr2, None, None, fake_host1)
metadata = {'availability_zone': 'another_zone'}
self.assertRaises(exception.InvalidAggregateAction,
self.api.update_aggregate,
self.context, aggr2['id'], metadata)
def test_update_aggregate(self):
# Ensure metadata can be updated.
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
fake_notifier.NOTIFICATIONS = []
aggr = self.api.update_aggregate(self.context, aggr['id'],
{'name': 'new_fake_aggregate'})
self.assertIsNone(availability_zones._get_cache().get('cache'))
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.updateprop.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.updateprop.end')
def test_update_aggregate_no_az(self):
# Ensure metadata without availability zone can be
# updated,even the aggregate contains hosts belong
# to another availability zone
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
fake_host = values[fake_zone][0]
aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
fake_zone, fake_host)
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
fake_host)
metadata = {'name': 'new_fake_aggregate'}
fake_notifier.NOTIFICATIONS = []
aggr2 = self.api.update_aggregate(self.context, aggr2['id'],
metadata)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.updateprop.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.updateprop.end')
def test_update_aggregate_az_change(self):
# Ensure availability zone can be updated,
# when the aggregate is the only one with
# availability zone
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
fake_host = values[fake_zone][0]
aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
fake_zone, fake_host)
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
fake_host)
metadata = {'availability_zone': 'new_fake_zone'}
fake_notifier.NOTIFICATIONS = []
aggr1 = self.api.update_aggregate(self.context, aggr1['id'],
metadata)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.updateprop.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.updateprop.end')
def test_update_aggregate_az_fails(self):
# Ensure aggregate's availability zone can't be updated,
# when aggregate has hosts in other availability zone
fake_notifier.NOTIFICATIONS = []
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
fake_host = values[fake_zone][0]
aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
fake_zone, fake_host)
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
fake_host)
metadata = {'availability_zone': 'another_zone'}
self.assertRaises(exception.InvalidAggregateAction,
self.api.update_aggregate,
self.context, aggr2['id'], metadata)
fake_host2 = values[fake_zone][1]
aggr3 = self._init_aggregate_with_host(None, 'fake_aggregate3',
None, fake_host2)
metadata = {'availability_zone': fake_zone}
aggr3 = self.api.update_aggregate(self.context, aggr3['id'],
metadata)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 15)
msg = fake_notifier.NOTIFICATIONS[13]
self.assertEqual(msg.event_type,
'aggregate.updateprop.start')
msg = fake_notifier.NOTIFICATIONS[14]
self.assertEqual(msg.event_type,
'aggregate.updateprop.end')
def test_update_aggregate_metadata(self):
# Ensure metadata can be updated.
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
metadata = {'foo_key1': 'foo_value1',
'foo_key2': 'foo_value2',
'availability_zone': 'fake_zone'}
fake_notifier.NOTIFICATIONS = []
availability_zones._get_cache().add('fake_key', 'fake_value')
aggr = self.api.update_aggregate_metadata(self.context, aggr['id'],
metadata)
self.assertIsNone(availability_zones._get_cache().get('fake_key'))
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.end')
metadata['foo_key1'] = None
expected = self.api.update_aggregate_metadata(self.context,
aggr['id'], metadata)
self.assertThat(expected['metadata'],
matchers.DictMatches({'availability_zone': 'fake_zone',
'foo_key2': 'foo_value2'}))
def test_update_aggregate_metadata_no_az(self):
# Ensure metadata without availability zone can be
# updated,even the aggregate contains hosts belong
# to another availability zone
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
fake_host = values[fake_zone][0]
aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
fake_zone, fake_host)
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
fake_host)
metadata = {'foo_key2': 'foo_value3'}
fake_notifier.NOTIFICATIONS = []
aggr2 = self.api.update_aggregate_metadata(self.context, aggr2['id'],
metadata)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.end')
self.assertThat(aggr2['metadata'],
matchers.DictMatches({'foo_key2': 'foo_value3'}))
def test_update_aggregate_metadata_az_change(self):
# Ensure availability zone can be updated,
# when the aggregate is the only one with
# availability zone
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
fake_host = values[fake_zone][0]
aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
fake_zone, fake_host)
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
fake_host)
metadata = {'availability_zone': 'new_fake_zone'}
fake_notifier.NOTIFICATIONS = []
aggr1 = self.api.update_aggregate_metadata(self.context,
aggr1['id'], metadata)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.end')
def test_update_aggregate_metadata_az_fails(self):
# Ensure aggregate's availability zone can't be updated,
# when aggregate has hosts in other availability zone
fake_notifier.NOTIFICATIONS = []
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
fake_host = values[fake_zone][0]
aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
fake_zone, fake_host)
aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
fake_host)
metadata = {'availability_zone': 'another_zone'}
self.assertRaises(exception.InvalidAggregateAction,
self.api.update_aggregate_metadata,
self.context, aggr2['id'], metadata)
fake_host2 = values[fake_zone][1]
aggr3 = self._init_aggregate_with_host(None, 'fake_aggregate3',
None, fake_host)
metadata = {'availability_zone': fake_zone}
aggr3 = self.api.update_aggregate_metadata(self.context,
aggr3['id'],
metadata)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 15)
msg = fake_notifier.NOTIFICATIONS[13]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.start')
msg = fake_notifier.NOTIFICATIONS[14]
self.assertEqual(msg.event_type,
'aggregate.updatemetadata.end')
def test_delete_aggregate(self):
# Ensure we can delete an aggregate.
fake_notifier.NOTIFICATIONS = []
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.create.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.create.end')
fake_notifier.NOTIFICATIONS = []
self.api.delete_aggregate(self.context, aggr['id'])
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.delete.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.delete.end')
db.aggregate_get(self.context.elevated(read_deleted='yes'),
aggr['id'])
self.assertRaises(exception.AggregateNotFound,
self.api.delete_aggregate, self.context, aggr['id'])
def test_delete_non_empty_aggregate(self):
# Ensure InvalidAggregateAction is raised when non empty aggregate.
_create_service_entries(self.context,
{'fake_availability_zone': ['fake_host']})
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_availability_zone')
self.api.add_host_to_aggregate(self.context, aggr['id'], 'fake_host')
self.assertRaises(exception.InvalidAggregateAction,
self.api.delete_aggregate, self.context, aggr['id'])
def test_add_host_to_aggregate(self):
# Ensure we can add a host to an aggregate.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
fake_host = values[fake_zone][0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
def fake_add_aggregate_host(*args, **kwargs):
hosts = kwargs["aggregate"]["hosts"]
self.assertIn(fake_host, hosts)
self.stubs.Set(self.api.compute_rpcapi, 'add_aggregate_host',
fake_add_aggregate_host)
self.mox.StubOutWithMock(availability_zones,
'update_host_availability_zone_cache')
availability_zones.update_host_availability_zone_cache(self.context,
fake_host)
self.mox.ReplayAll()
fake_notifier.NOTIFICATIONS = []
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], fake_host)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.addhost.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.addhost.end')
self.assertEqual(len(aggr['hosts']), 1)
def test_add_host_to_aggr_with_no_az(self):
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
fake_host = values[fake_zone][0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
aggr = self.api.add_host_to_aggregate(self.context, aggr['id'],
fake_host)
aggr_no_az = self.api.create_aggregate(self.context, 'fake_aggregate2',
None)
aggr_no_az = self.api.add_host_to_aggregate(self.context,
aggr_no_az['id'],
fake_host)
self.assertIn(fake_host, aggr['hosts'])
self.assertIn(fake_host, aggr_no_az['hosts'])
def test_add_host_no_az_metadata(self):
# NOTE(mtreinish) based on how create works this is not how the
# the metadata is supposed to end up in the database but it has
# been seen. See lp bug #1209007. This test just confirms that
# the host is still added to the aggregate if there is no
# availability zone metadata.
def fake_aggregate_metadata_get_by_metadata_key(*args, **kwargs):
return {'meta_key': 'fake_value'}
self.stubs.Set(self.compute.db,
'aggregate_metadata_get_by_metadata_key',
fake_aggregate_metadata_get_by_metadata_key)
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
fake_host = values[fake_zone][0]
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
fake_zone)
aggr = self.api.add_host_to_aggregate(self.context, aggr['id'],
fake_host)
self.assertIn(fake_host, aggr['hosts'])
def test_add_host_to_multi_az(self):
# Ensure we can't add a host to different availability zone
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
fake_host = values[fake_zone][0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], fake_host)
self.assertEqual(len(aggr['hosts']), 1)
fake_zone2 = "another_zone"
aggr2 = self.api.create_aggregate(self.context,
'fake_aggregate2', fake_zone2)
self.assertRaises(exception.InvalidAggregateAction,
self.api.add_host_to_aggregate,
self.context, aggr2['id'], fake_host)
def test_add_host_to_aggregate_multiple(self):
# Ensure we can add multiple hosts to an aggregate.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
for host in values[fake_zone]:
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], host)
self.assertEqual(len(aggr['hosts']), len(values[fake_zone]))
def test_add_host_to_aggregate_raise_not_found(self):
# Ensure ComputeHostNotFound is raised when adding invalid host.
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
fake_notifier.NOTIFICATIONS = []
self.assertRaises(exception.ComputeHostNotFound,
self.api.add_host_to_aggregate,
self.context, aggr['id'], 'invalid_host')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
self.assertEqual(fake_notifier.NOTIFICATIONS[1].publisher_id,
'compute.fake-mini')
def test_remove_host_from_aggregate_active(self):
# Ensure we can remove a host from an aggregate.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
for host in values[fake_zone]:
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], host)
host_to_remove = values[fake_zone][0]
def fake_remove_aggregate_host(*args, **kwargs):
hosts = kwargs["aggregate"]["hosts"]
self.assertNotIn(host_to_remove, hosts)
self.stubs.Set(self.api.compute_rpcapi, 'remove_aggregate_host',
fake_remove_aggregate_host)
self.mox.StubOutWithMock(availability_zones,
'update_host_availability_zone_cache')
availability_zones.update_host_availability_zone_cache(self.context,
host_to_remove)
self.mox.ReplayAll()
fake_notifier.NOTIFICATIONS = []
expected = self.api.remove_host_from_aggregate(self.context,
aggr['id'],
host_to_remove)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg.event_type,
'aggregate.removehost.start')
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg.event_type,
'aggregate.removehost.end')
self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
def test_remove_host_from_aggregate_raise_not_found(self):
# Ensure ComputeHostNotFound is raised when removing invalid host.
_create_service_entries(self.context, {'fake_zone': ['fake_host']})
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
self.assertRaises(exception.ComputeHostNotFound,
self.api.remove_host_from_aggregate,
self.context, aggr['id'], 'invalid_host')
def test_aggregate_list(self):
aggregate = self.api.create_aggregate(self.context,
'fake_aggregate',
'fake_zone')
metadata = {'foo_key1': 'foo_value1',
'foo_key2': 'foo_value2'}
meta_aggregate = self.api.create_aggregate(self.context,
'fake_aggregate2',
'fake_zone2')
self.api.update_aggregate_metadata(self.context, meta_aggregate['id'],
metadata)
aggregate_list = self.api.get_aggregate_list(self.context)
self.assertIn(aggregate['id'],
map(lambda x: x['id'], aggregate_list))
self.assertIn(meta_aggregate['id'],
map(lambda x: x['id'], aggregate_list))
self.assertIn('fake_aggregate',
map(lambda x: x['name'], aggregate_list))
self.assertIn('fake_aggregate2',
map(lambda x: x['name'], aggregate_list))
self.assertIn('fake_zone',
map(lambda x: x['availability_zone'], aggregate_list))
self.assertIn('fake_zone2',
map(lambda x: x['availability_zone'], aggregate_list))
test_meta_aggregate = aggregate_list[1]
self.assertIn('foo_key1', test_meta_aggregate.get('metadata'))
self.assertIn('foo_key2', test_meta_aggregate.get('metadata'))
self.assertEqual('foo_value1',
test_meta_aggregate.get('metadata')['foo_key1'])
self.assertEqual('foo_value2',
test_meta_aggregate.get('metadata')['foo_key2'])
def test_aggregate_list_with_hosts(self):
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
host_aggregate = self.api.create_aggregate(self.context,
'fake_aggregate',
fake_zone)
self.api.add_host_to_aggregate(self.context, host_aggregate['id'],
values[fake_zone][0])
aggregate_list = self.api.get_aggregate_list(self.context)
aggregate = aggregate_list[0]
self.assertIn(values[fake_zone][0], aggregate.get('hosts'))
class ComputeAggrTestCase(BaseTestCase):
"""This is for unit coverage of aggregate-related methods
defined in nova.compute.manager.
"""
def setUp(self):
super(ComputeAggrTestCase, self).setUp()
self.context = context.get_admin_context()
values = {'name': 'test_aggr'}
az = {'availability_zone': 'test_zone'}
self.aggr = db.aggregate_create(self.context, values, metadata=az)
def test_add_aggregate_host(self):
def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
fake_driver_add_to_aggregate.called = True
return {"foo": "bar"}
self.stubs.Set(self.compute.driver, "add_to_aggregate",
fake_driver_add_to_aggregate)
self.compute.add_aggregate_host(self.context, host="host",
aggregate=jsonutils.to_primitive(self.aggr), slave_info=None)
self.assertTrue(fake_driver_add_to_aggregate.called)
def test_remove_aggregate_host(self):
def fake_driver_remove_from_aggregate(context, aggregate, host,
**_ignore):
fake_driver_remove_from_aggregate.called = True
self.assertEqual("host", host, "host")
return {"foo": "bar"}
self.stubs.Set(self.compute.driver, "remove_from_aggregate",
fake_driver_remove_from_aggregate)
self.compute.remove_aggregate_host(self.context,
aggregate=jsonutils.to_primitive(self.aggr), host="host",
slave_info=None)
self.assertTrue(fake_driver_remove_from_aggregate.called)
def test_add_aggregate_host_passes_slave_info_to_driver(self):
def driver_add_to_aggregate(context, aggregate, host, **kwargs):
self.assertEqual(self.context, context)
self.assertEqual(aggregate['id'], self.aggr['id'])
self.assertEqual(host, "the_host")
self.assertEqual("SLAVE_INFO", kwargs.get("slave_info"))
self.stubs.Set(self.compute.driver, "add_to_aggregate",
driver_add_to_aggregate)
self.compute.add_aggregate_host(self.context, host="the_host",
slave_info="SLAVE_INFO",
aggregate=jsonutils.to_primitive(self.aggr))
def test_remove_from_aggregate_passes_slave_info_to_driver(self):
def driver_remove_from_aggregate(context, aggregate, host, **kwargs):
self.assertEqual(self.context, context)
self.assertEqual(aggregate['id'], self.aggr['id'])
self.assertEqual(host, "the_host")
self.assertEqual("SLAVE_INFO", kwargs.get("slave_info"))
self.stubs.Set(self.compute.driver, "remove_from_aggregate",
driver_remove_from_aggregate)
self.compute.remove_aggregate_host(self.context,
aggregate=jsonutils.to_primitive(self.aggr), host="the_host",
slave_info="SLAVE_INFO")
class ComputePolicyTestCase(BaseTestCase):
def setUp(self):
super(ComputePolicyTestCase, self).setUp()
self.compute_api = compute.API()
def test_actions_are_prefixed(self):
self.mox.StubOutWithMock(policy, 'enforce')
nova.policy.enforce(self.context, 'compute:reboot', {})
self.mox.ReplayAll()
compute_api.check_policy(self.context, 'reboot', {})
def test_wrapped_method(self):
instance = self._create_fake_instance(params={'host': None,
'cell_name': 'foo'})
# force delete to fail
rules = {"compute:delete": [["false:false"]]}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.delete, self.context, instance)
# reset rules to allow deletion
rules = {"compute:delete": []}
self.policy.set_rules(rules)
self.compute_api.delete(self.context, self._objectify(instance))
def test_create_fail(self):
rules = {"compute:create": [["false:false"]]}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, '1', '1')
def test_create_attach_volume_fail(self):
rules = {
"compute:create": [],
"compute:create:attach_network": [["false:false"]],
"compute:create:attach_volume": [],
}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, '1', '1',
requested_networks='blah',
block_device_mapping='blah')
def test_create_attach_network_fail(self):
rules = {
"compute:create": [],
"compute:create:attach_network": [],
"compute:create:attach_volume": [["false:false"]],
}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, '1', '1',
requested_networks='blah',
block_device_mapping='blah')
def test_get_fail(self):
instance = self._create_fake_instance()
rules = {
"compute:get": [["false:false"]],
}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get, self.context, instance['uuid'])
def test_get_all_fail(self):
rules = {
"compute:get_all": [["false:false"]],
}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get_all, self.context)
def test_get_instance_faults(self):
instance1 = self._create_fake_instance()
instance2 = self._create_fake_instance()
instances = [instance1, instance2]
rules = {
"compute:get_instance_faults": [["false:false"]],
}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get_instance_faults,
context.get_admin_context(), instances)
def test_force_host_fail(self):
rules = {"compute:create": [],
"compute:create:forced_host": [["role:fake"]],
"network:validate_networks": []}
self.policy.set_rules(rules)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, None, '1',
availability_zone='1:1')
def test_force_host_pass(self):
rules = {"compute:create": [],
"compute:create:forced_host": [],
"network:validate_networks": []}
self.policy.set_rules(rules)
self.compute_api.create(self.context, None, '1',
availability_zone='1:1')
class DisabledInstanceTypesTestCase(BaseTestCase):
"""Some instance-types are marked 'disabled' which means that they will not
show up in customer-facing listings. We do, however, want those
instance-types to be available for emergency migrations and for rebuilding
of existing instances.
One legitimate use of the 'disabled' field would be when phasing out a
particular instance-type. We still want customers to be able to use an
instance that of the old type, and we want Ops to be able perform
migrations against it, but we *don't* want customers building new slices
with ths phased-out instance-type.
"""
def setUp(self):
super(DisabledInstanceTypesTestCase, self).setUp()
self.compute_api = compute.API()
self.inst_type = flavors.get_default_flavor()
def test_can_build_instance_from_visible_instance_type(self):
self.inst_type['disabled'] = False
# Assert that exception.FlavorNotFound is not raised
self.compute_api.create(self.context, self.inst_type,
image_href='some-fake-image')
def test_cannot_build_instance_from_disabled_instance_type(self):
self.inst_type['disabled'] = True
self.assertRaises(exception.FlavorNotFound,
self.compute_api.create, self.context, self.inst_type, None)
def test_can_resize_to_visible_instance_type(self):
instance = self._create_fake_instance_obj()
orig_get_flavor_by_flavor_id =\
flavors.get_flavor_by_flavor_id
def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None,
read_deleted="yes"):
instance_type = orig_get_flavor_by_flavor_id(flavor_id,
ctxt,
read_deleted)
instance_type['disabled'] = False
return instance_type
self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
fake_get_flavor_by_flavor_id)
self._stub_migrate_server()
self.compute_api.resize(self.context, instance, '4')
def test_cannot_resize_to_disabled_instance_type(self):
instance = self._create_fake_instance_obj()
orig_get_flavor_by_flavor_id = \
flavors.get_flavor_by_flavor_id
def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None,
read_deleted="yes"):
instance_type = orig_get_flavor_by_flavor_id(flavor_id,
ctxt,
read_deleted)
instance_type['disabled'] = True
return instance_type
self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
fake_get_flavor_by_flavor_id)
self.assertRaises(exception.FlavorNotFound,
self.compute_api.resize, self.context, instance, '4')
class ComputeReschedulingTestCase(BaseTestCase):
"""Tests re-scheduling logic for new build requests."""
def setUp(self):
super(ComputeReschedulingTestCase, self).setUp()
self.expected_task_state = task_states.SCHEDULING
def fake_update(*args, **kwargs):
self.updated_task_state = kwargs.get('task_state')
self.stubs.Set(self.compute, '_instance_update', fake_update)
def _reschedule(self, request_spec=None, filter_properties=None,
exc_info=None):
if not filter_properties:
filter_properties = {}
instance_uuid = "12-34-56-78-90"
admin_password = None
injected_files = None
requested_networks = None
is_first_time = False
scheduler_method = self.compute.scheduler_rpcapi.run_instance
method_args = (request_spec, admin_password, injected_files,
requested_networks, is_first_time, filter_properties)
return self.compute._reschedule(self.context, request_spec,
filter_properties, instance_uuid, scheduler_method,
method_args, self.expected_task_state, exc_info=exc_info)
def test_reschedule_no_filter_properties(self):
# no filter_properties will disable re-scheduling.
self.assertFalse(self._reschedule())
def test_reschedule_no_retry_info(self):
# no retry info will also disable re-scheduling.
filter_properties = {}
self.assertFalse(self._reschedule(filter_properties=filter_properties))
def test_reschedule_no_request_spec(self):
# no request spec will also disable re-scheduling.
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
self.assertFalse(self._reschedule(filter_properties=filter_properties))
def test_reschedule_success(self):
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
request_spec = {'instance_uuids': ['foo', 'bar']}
try:
raise test.TestingException("just need an exception")
except test.TestingException:
exc_info = sys.exc_info()
exc_str = traceback.format_exception(*exc_info)
self.assertTrue(self._reschedule(filter_properties=filter_properties,
request_spec=request_spec, exc_info=exc_info))
self.assertEqual(1, len(request_spec['instance_uuids']))
self.assertEqual(self.updated_task_state, self.expected_task_state)
self.assertEqual(exc_str, filter_properties['retry']['exc'])
class ComputeReschedulingResizeTestCase(ComputeReschedulingTestCase):
"""Test re-scheduling logic for prep_resize requests."""
def setUp(self):
super(ComputeReschedulingResizeTestCase, self).setUp()
self.expected_task_state = task_states.RESIZE_PREP
def _reschedule(self, request_spec=None, filter_properties=None,
exc_info=None):
if not filter_properties:
filter_properties = {}
instance_uuid = "12-34-56-78-90"
instance = fake_instance.fake_db_instance(uuid=instance_uuid)
instance = self._objectify(instance)
instance_type = {}
image = None
reservations = None
scheduler_method = self.compute.scheduler_rpcapi.prep_resize
method_args = (instance, instance_type, image, request_spec,
filter_properties, reservations)
return self.compute._reschedule(self.context, request_spec,
filter_properties, instance_uuid, scheduler_method,
method_args, self.expected_task_state, exc_info=exc_info)
class InnerTestingException(Exception):
pass
class ComputeRescheduleOrErrorTestCase(BaseTestCase):
"""Test logic and exception handling around rescheduling or re-raising
original exceptions when builds fail.
"""
def setUp(self):
super(ComputeRescheduleOrErrorTestCase, self).setUp()
self.instance = self._create_fake_instance()
def test_reschedule_or_error_called(self):
"""Basic sanity check to make sure _reschedule_or_error is called
when a build fails.
"""
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMappingList,
'get_by_instance_uuid')
self.mox.StubOutWithMock(self.compute, '_spawn')
self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
bdms = block_device_obj.block_device_make_list(self.context, [])
block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
mox.IgnoreArg(), self.instance.uuid).AndReturn(bdms)
self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(),
[], mox.IgnoreArg(), [], None, set_access_ip=False).AndRaise(
test.TestingException("BuildError"))
self.compute._reschedule_or_error(mox.IgnoreArg(), self.instance,
mox.IgnoreArg(), None, None, None,
False, None, {}, bdms, False).AndReturn(True)
self.mox.ReplayAll()
self.compute._run_instance(self.context, None, {}, None, None, None,
False, None, self.instance, False)
def test_shutdown_instance_fail(self):
"""Test shutdown instance failing before re-scheduling logic can even
run.
"""
instance_uuid = self.instance['uuid']
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
try:
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
compute_utils.add_instance_fault_from_exc(self.context,
self.compute.conductor_api,
self.instance, exc_info[0], exc_info=exc_info)
self.compute._shutdown_instance(self.context, self.instance,
mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(InnerTestingException("Error"))
self.compute._log_original_error(exc_info, instance_uuid)
self.mox.ReplayAll()
# should raise the deallocation exception, not the original build
# error:
self.assertRaises(InnerTestingException,
self.compute._reschedule_or_error, self.context,
self.instance, exc_info, None, None, None, False, None, {})
def test_shutdown_instance_fail_instance_info_cache_not_found(self):
# Covers the case that _shutdown_instance fails with an
# InstanceInfoCacheNotFound exception when getting instance network
# information prior to calling driver.destroy.
elevated_context = self.context.elevated()
error = exception.InstanceInfoCacheNotFound(
instance_uuid=self.instance['uuid'])
with contextlib.nested(
mock.patch.object(self.context, 'elevated',
return_value=elevated_context),
mock.patch.object(self.compute, '_get_instance_nw_info',
side_effect=error),
mock.patch.object(self.compute,
'_get_instance_volume_block_device_info'),
mock.patch.object(self.compute.driver, 'destroy'),
mock.patch.object(self.compute, '_try_deallocate_network')
) as (
elevated_mock,
_get_instance_nw_info_mock,
_get_instance_volume_block_device_info_mock,
destroy_mock,
_try_deallocate_network_mock
):
self.compute._shutdown_instance(self.context, self.instance,
bdms=[], notify=False)
# By asserting that _try_deallocate_network_mock was called
# exactly once, we know that _get_instance_nw_info raising
# InstanceInfoCacheNotFound did not make _shutdown_instance error
# out and driver.destroy was still called.
_try_deallocate_network_mock.assert_called_once_with(
elevated_context, self.instance, None)
def test_reschedule_fail(self):
# Test handling of exception from _reschedule.
try:
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(self.compute, '_reschedule')
self.compute._shutdown_instance(self.context, self.instance,
mox.IgnoreArg(),
mox.IgnoreArg())
self.compute._cleanup_volumes(self.context, instance_uuid,
mox.IgnoreArg())
self.compute._reschedule(self.context, None, instance_uuid,
{}, self.compute.scheduler_rpcapi.run_instance,
method_args, task_states.SCHEDULING, exc_info).AndRaise(
InnerTestingException("Inner"))
self.mox.ReplayAll()
self.assertFalse(self.compute._reschedule_or_error(self.context,
self.instance, exc_info, None, None, None, False, None, {}))
def test_reschedule_false(self):
# Test not-rescheduling, but no nested exception.
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(self.compute, '_reschedule')
try:
raise test.TestingException("Original")
except test.TestingException:
exc_info = sys.exc_info()
compute_utils.add_instance_fault_from_exc(self.context,
self.compute.conductor_api,
self.instance, exc_info[0], exc_info=exc_info)
self.compute._shutdown_instance(self.context, self.instance,
mox.IgnoreArg(),
mox.IgnoreArg())
self.compute._cleanup_volumes(self.context, instance_uuid,
mox.IgnoreArg())
self.compute._reschedule(self.context, None, {}, instance_uuid,
self.compute.scheduler_rpcapi.run_instance, method_args,
task_states.SCHEDULING, exc_info).AndReturn(False)
self.mox.ReplayAll()
# re-scheduling is False, the original build error should be
# raised here:
self.assertFalse(self.compute._reschedule_or_error(self.context,
self.instance, exc_info, None, None, None, False, None, {}))
def test_reschedule_true(self):
# Test behavior when re-scheduling happens.
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(self.compute, '_reschedule')
try:
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
compute_utils.add_instance_fault_from_exc(self.context,
self.compute.conductor_api,
self.instance, exc_info[0], exc_info=exc_info)
self.compute._shutdown_instance(self.context, self.instance,
mox.IgnoreArg(),
mox.IgnoreArg())
self.compute._cleanup_volumes(self.context, instance_uuid,
mox.IgnoreArg())
self.compute._reschedule(self.context, None, {}, instance_uuid,
self.compute.scheduler_rpcapi.run_instance,
method_args, task_states.SCHEDULING, exc_info).AndReturn(
True)
self.compute._log_original_error(exc_info, instance_uuid)
self.mox.ReplayAll()
# re-scheduling is True, original error is logged, but nothing
# is raised:
self.compute._reschedule_or_error(self.context, self.instance,
exc_info, None, None, None, False, None, {})
def test_no_reschedule_on_delete_during_spawn(self):
# instance should not be rescheduled if instance is deleted
# during the build
self.mox.StubOutWithMock(self.compute, '_spawn')
self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
exc = exception.UnexpectedDeletingTaskStateError(
expected=task_states.SPAWNING, actual=task_states.DELETING)
self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), set_access_ip=False).AndRaise(exc)
self.mox.ReplayAll()
# test succeeds if mocked method '_reschedule_or_error' is not
# called.
self.compute._run_instance(self.context, None, {}, None, None, None,
False, None, self.instance, False)
def test_no_reschedule_on_unexpected_task_state(self):
# instance shouldn't be rescheduled if unexpected task state arises.
# the exception should get reraised.
self.mox.StubOutWithMock(self.compute, '_spawn')
self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
exc = exception.UnexpectedTaskStateError(expected=task_states.SPAWNING,
actual=task_states.SCHEDULING)
self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), set_access_ip=False).AndRaise(exc)
self.mox.ReplayAll()
self.assertRaises(exception.UnexpectedTaskStateError,
self.compute._run_instance, self.context, None, {}, None, None,
None, False, None, self.instance, False)
def test_no_reschedule_on_block_device_fail(self):
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
exc = exception.InvalidBDM()
self.compute._prep_block_device(mox.IgnoreArg(), self.instance,
mox.IgnoreArg()).AndRaise(exc)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidBDM, self.compute._run_instance,
self.context, None, {}, None, None, None, False,
None, self.instance, False)
class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
"""Test logic and exception handling around rescheduling prep resize
requests
"""
def setUp(self):
super(ComputeRescheduleResizeOrReraiseTestCase, self).setUp()
self.instance = self._create_fake_instance()
self.instance_uuid = self.instance['uuid']
self.instance_type = flavors.get_flavor_by_name(
"m1.tiny")
def test_reschedule_resize_or_reraise_called(self):
"""Verify the rescheduling logic gets called when there is an error
during prep_resize.
"""
inst_obj = self._create_fake_instance_obj()
self.mox.StubOutWithMock(self.compute.db, 'migration_create')
self.mox.StubOutWithMock(self.compute, '_reschedule_resize_or_reraise')
self.compute.db.migration_create(mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(test.TestingException("Original"))
self.compute._reschedule_resize_or_reraise(mox.IgnoreArg(), None,
inst_obj, mox.IgnoreArg(), self.instance_type,
mox.IgnoreArg(), {},
{})
self.mox.ReplayAll()
self.compute.prep_resize(self.context, image=None,
instance=inst_obj,
instance_type=self.instance_type,
reservations=[], request_spec={},
filter_properties={}, node=None)
def test_reschedule_fails_with_exception(self):
"""Original exception should be raised if the _reschedule method
raises another exception
"""
instance = self._create_fake_instance_obj()
method_args = (None, instance, self.instance_type, None, None,
None)
self.mox.StubOutWithMock(self.compute, "_reschedule")
self.compute._reschedule(
self.context, None, None, instance.uuid,
self.compute.scheduler_rpcapi.prep_resize, method_args,
task_states.RESIZE_PREP).AndRaise(
InnerTestingException("Inner"))
self.mox.ReplayAll()
try:
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
self.assertRaises(test.TestingException,
self.compute._reschedule_resize_or_reraise, self.context,
None, instance, exc_info, self.instance_type,
self.none_quotas, {}, {})
def test_reschedule_false(self):
"""Original exception should be raised if the resize is not
rescheduled.
"""
instance = self._create_fake_instance_obj()
method_args = (None, instance, self.instance_type, None, None, None)
self.mox.StubOutWithMock(self.compute, "_reschedule")
self.compute._reschedule(
self.context, None, None, instance.uuid,
self.compute.scheduler_rpcapi.prep_resize, method_args,
task_states.RESIZE_PREP).AndReturn(False)
self.mox.ReplayAll()
try:
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
self.assertRaises(test.TestingException,
self.compute._reschedule_resize_or_reraise, self.context,
None, instance, exc_info, self.instance_type,
self.none_quotas, {}, {})
def test_reschedule_true(self):
# If rescheduled, the original resize exception should be logged.
instance = self._create_fake_instance_obj()
instance_p = obj_base.obj_to_primitive(instance)
method_args = (instance_p, self.instance_type, None, {}, {}, None)
try:
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
self.mox.StubOutWithMock(self.compute, "_reschedule")
self.mox.StubOutWithMock(self.compute, "_log_original_error")
self.compute._reschedule(self.context, {}, {},
instance.uuid,
self.compute.scheduler_rpcapi.prep_resize, method_args,
task_states.RESIZE_PREP, exc_info).AndReturn(True)
self.compute._log_original_error(exc_info, instance.uuid)
self.mox.ReplayAll()
self.compute._reschedule_resize_or_reraise(
self.context, None, instance, exc_info,
self.instance_type, self.none_quotas, {}, {})
class ComputeInactiveImageTestCase(BaseTestCase):
def setUp(self):
super(ComputeInactiveImageTestCase, self).setUp()
def fake_show(meh, context, id):
return {'id': id, 'min_disk': None, 'min_ram': None,
'name': 'fake_name',
'status': 'deleted',
'properties': {'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'something_else': 'meow'}}
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
self.compute_api = compute.API()
def test_create_instance_with_deleted_image(self):
# Make sure we can't start an instance with a deleted image.
inst_type = flavors.get_flavor_by_name('m1.tiny')
self.assertRaises(exception.ImageNotActive,
self.compute_api.create,
self.context, inst_type, 'fake-image-uuid')
class EvacuateHostTestCase(BaseTestCase):
def setUp(self):
super(EvacuateHostTestCase, self).setUp()
self.inst_ref = jsonutils.to_primitive(self._create_fake_instance
({'host': 'fake_host_2',
'node': 'fakenode2'}))
db.instance_update(self.context, self.inst_ref['uuid'],
{"task_state": task_states.REBUILDING})
def tearDown(self):
db.instance_destroy(self.context, self.inst_ref['uuid'])
super(EvacuateHostTestCase, self).tearDown()
def _rebuild(self, on_shared_storage=True):
def fake(cls, ctxt, instance, *args, **kwargs):
pass
self.stubs.Set(network_api.API, 'setup_networks_on_host', fake)
orig_image_ref = None
image_ref = None
injected_files = None
bdms = db.block_device_mapping_get_all_by_instance(self.context,
self.inst_ref['uuid'])
self.compute.rebuild_instance(
self.context, self._objectify(self.inst_ref), orig_image_ref,
image_ref, injected_files, 'newpass', {}, bdms, recreate=True,
on_shared_storage=on_shared_storage)
def test_rebuild_on_host_updated_target(self):
"""Confirm evacuate scenario updates host and node."""
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
def fake_get_compute_info(context, host):
self.assertTrue(context.is_admin)
self.assertEqual('fake-mini', host)
cn = compute_node_obj.ComputeNode(
hypervisor_hostname=self.rt.nodename)
return cn
self.stubs.Set(self.compute, '_get_compute_info',
fake_get_compute_info)
self.mox.ReplayAll()
self._rebuild()
# Should be on destination host
instance = db.instance_get(self.context, self.inst_ref['id'])
self.assertEqual(instance['host'], self.compute.host)
self.assertEqual(NODENAME, instance['node'])
def test_rebuild_on_host_updated_target_node_not_found(self):
"""Confirm evacuate scenario where compute_node isn't found."""
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
def fake_get_compute_info(context, host):
raise exception.NotFound(_("Host %s not found") % host)
self.stubs.Set(self.compute, '_get_compute_info',
fake_get_compute_info)
self.mox.ReplayAll()
self._rebuild()
# Should be on destination host
instance = db.instance_get(self.context, self.inst_ref['id'])
self.assertEqual(instance['host'], self.compute.host)
self.assertIsNone(instance['node'])
def test_rebuild_with_instance_in_stopped_state(self):
"""Confirm evacuate scenario updates vm_state to stopped
if instance is in stopped state
"""
#Initialize the VM to stopped state
db.instance_update(self.context, self.inst_ref['uuid'],
{"vm_state": vm_states.STOPPED})
self.inst_ref['vm_state'] = vm_states.STOPPED
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
self.mox.ReplayAll()
self._rebuild()
#Check the vm state is reset to stopped
instance = db.instance_get(self.context, self.inst_ref['id'])
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
def test_rebuild_with_wrong_shared_storage(self):
"""Confirm evacuate scenario does not update host."""
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
lambda: self._rebuild(on_shared_storage=False))
# Should remain on original host
instance = db.instance_get(self.context, self.inst_ref['id'])
self.assertEqual(instance['host'], 'fake_host_2')
def test_rebuild_on_host_with_volumes(self):
"""Confirm evacuate scenario reconnects volumes."""
values = {'instance_uuid': self.inst_ref['uuid'],
'source_type': 'volume',
'device_name': '/dev/vdc',
'delete_on_termination': False,
'volume_id': 'fake_volume_id'}
db.block_device_mapping_create(self.context, values)
def fake_volume_get(self, context, volume):
return {'id': 'fake_volume_id'}
self.stubs.Set(cinder.API, "get", fake_volume_get)
# Stub out and record whether it gets detached
result = {"detached": False}
def fake_detach(self, context, volume):
result["detached"] = volume["id"] == 'fake_volume_id'
self.stubs.Set(cinder.API, "detach", fake_detach)
def fake_terminate_connection(self, context, volume, connector):
return {}
self.stubs.Set(cinder.API, "terminate_connection",
fake_terminate_connection)
# make sure volumes attach, detach are called
self.mox.StubOutWithMock(self.compute.volume_api, 'detach')
self.compute.volume_api.detach(mox.IsA(self.context), mox.IgnoreArg())
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.compute._prep_block_device(mox.IsA(self.context),
mox.IsA(instance_obj.Instance),
mox.IgnoreArg())
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
self.mox.ReplayAll()
self._rebuild()
# cleanup
for bdms in db.block_device_mapping_get_all_by_instance(
self.context, self.inst_ref['uuid']):
db.block_device_mapping_destroy(self.context, bdms['id'])
def test_rebuild_on_host_with_shared_storage(self):
"""Confirm evacuate scenario on shared storage."""
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.compute.driver.spawn(mox.IsA(self.context),
mox.IsA(instance_obj.Instance), {}, mox.IgnoreArg(), 'newpass',
network_info=mox.IgnoreArg(),
block_device_info=mox.IgnoreArg())
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
self.mox.ReplayAll()
self._rebuild()
def test_rebuild_on_host_without_shared_storage(self):
"""Confirm evacuate scenario without shared storage
(rebuild from image)
"""
fake_image = {'id': 1,
'name': 'fake_name',
'properties': {'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id'}}
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.compute.driver.spawn(mox.IsA(self.context),
mox.IsA(instance_obj.Instance), mox.IsA(fake_image),
mox.IgnoreArg(), mox.IsA('newpass'),
network_info=mox.IgnoreArg(),
block_device_info=mox.IgnoreArg())
self.stubs.Set(self.compute.driver, 'instance_on_disk',
lambda x: False)
self.mox.ReplayAll()
self._rebuild(on_shared_storage=False)
def test_rebuild_on_host_instance_exists(self):
"""Rebuild if instance exists raises an exception."""
db.instance_update(self.context, self.inst_ref['uuid'],
{"task_state": task_states.SCHEDULING})
self.compute.run_instance(self.context, self.inst_ref, {}, {},
[], None, None, True, None, False)
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
self.assertRaises(exception.InstanceExists,
lambda: self._rebuild(on_shared_storage=True))
def test_driver_doesnt_support_recreate(self):
with utils.temporary_mutation(self.compute.driver.capabilities,
supports_recreate=False):
self.stubs.Set(self.compute.driver, 'instance_on_disk',
lambda x: True)
self.assertRaises(exception.InstanceRecreateNotSupported,
lambda: self._rebuild(on_shared_storage=True))
class ComputeInjectedFilesTestCase(BaseTestCase):
# Test that running instances with injected_files decodes files correctly
def setUp(self):
super(ComputeInjectedFilesTestCase, self).setUp()
self.instance = self._create_fake_instance_obj()
self.stubs.Set(self.compute.driver, 'spawn', self._spawn)
def _spawn(self, context, instance, image_meta, injected_files,
admin_password, nw_info, block_device_info, db_api=None):
self.assertEqual(self.expected, injected_files)
def _test(self, injected_files, decoded_files):
self.expected = decoded_files
self.compute.run_instance(self.context, self.instance, {}, {}, [],
injected_files, None, True, None, False)
def test_injected_none(self):
# test an input of None for injected_files
self._test(None, [])
def test_injected_empty(self):
# test an input of [] for injected_files
self._test([], [])
def test_injected_success(self):
# test with valid b64 encoded content.
injected_files = [
('/a/b/c', base64.b64encode('foobarbaz')),
('/d/e/f', base64.b64encode('seespotrun')),
]
decoded_files = [
('/a/b/c', 'foobarbaz'),
('/d/e/f', 'seespotrun'),
]
self._test(injected_files, decoded_files)
def test_injected_invalid(self):
# test with invalid b64 encoded content
injected_files = [
('/a/b/c', base64.b64encode('foobarbaz')),
('/d/e/f', 'seespotrun'),
]
self.assertRaises(exception.Base64Exception, self.compute.run_instance,
self.context, self.instance, {}, {}, [], injected_files, None,
True, None, False)
def test_reschedule(self):
# test that rescheduling is done with original encoded files
expected = [
('/a/b/c', base64.b64encode('foobarbaz')),
('/d/e/f', base64.b64encode('seespotrun')),
]
def _roe(context, instance, exc_info, requested_networks,
admin_password, injected_files, is_first_time, request_spec,
filter_properties, bdms=None, legacy_bdm_in_spec=False):
self.assertEqual(expected, injected_files)
return True
def spawn_explode(context, instance, image_meta, injected_files,
admin_password, nw_info, block_device_info):
# force reschedule logic to execute
raise test.TestingException(_("spawn error"))
self.stubs.Set(self.compute.driver, 'spawn', spawn_explode)
self.stubs.Set(self.compute, '_reschedule_or_error', _roe)
self.compute.run_instance(self.context, self.instance, {}, {}, [],
expected, None, True, None, False)
class CheckConfigDriveTestCase(test.TestCase):
# NOTE(sirp): `TestCase` is far too heavyweight for this test, this should
# probably derive from a `test.FastTestCase` that omits DB and env
# handling
def setUp(self):
super(CheckConfigDriveTestCase, self).setUp()
self.compute_api = compute.API()
def _assertCheck(self, expected, config_drive):
self.assertEqual(expected,
self.compute_api._check_config_drive(config_drive))
def _assertInvalid(self, config_drive):
self.assertRaises(exception.ConfigDriveInvalidValue,
self.compute_api._check_config_drive,
config_drive)
def test_config_drive_false_values(self):
self._assertCheck('', None)
self._assertCheck('', '')
self._assertCheck('', 'False')
self._assertCheck('', 'f')
self._assertCheck('', '0')
def test_config_drive_true_values(self):
self._assertCheck(True, 'True')
self._assertCheck(True, 't')
self._assertCheck(True, '1')
def test_config_drive_bogus_values_raise(self):
self._assertInvalid('asd')
self._assertInvalid(uuidutils.generate_uuid())
class CheckRequestedImageTestCase(test.TestCase):
def setUp(self):
super(CheckRequestedImageTestCase, self).setUp()
self.compute_api = compute.API()
self.context = context.RequestContext(
'fake_user_id', 'fake_project_id')
self.instance_type = flavors.get_default_flavor()
self.instance_type['memory_mb'] = 64
self.instance_type['root_gb'] = 1
def test_no_image_specified(self):
self.compute_api._check_requested_image(self.context, None, {},
self.instance_type)
def test_image_status_must_be_active(self):
image = dict(id='123', status='foo')
self.assertRaises(exception.ImageNotActive,
self.compute_api._check_requested_image, self.context,
image['id'], image, self.instance_type)
image['status'] = 'active'
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
def test_image_min_ram_check(self):
image = dict(id='123', status='active', min_ram='65')
self.assertRaises(exception.FlavorMemoryTooSmall,
self.compute_api._check_requested_image, self.context,
image['id'], image, self.instance_type)
image['min_ram'] = '64'
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
def test_image_min_disk_check(self):
image = dict(id='123', status='active', min_disk='2')
self.assertRaises(exception.FlavorDiskTooSmall,
self.compute_api._check_requested_image, self.context,
image['id'], image, self.instance_type)
image['min_disk'] = '1'
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
def test_image_too_large(self):
image = dict(id='123', status='active', size='1073741825')
self.assertRaises(exception.FlavorDiskTooSmall,
self.compute_api._check_requested_image, self.context,
image['id'], image, self.instance_type)
image['size'] = '1073741824'
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
def test_root_gb_zero_disables_size_check(self):
self.instance_type['root_gb'] = 0
image = dict(id='123', status='active', size='1073741825')
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
def test_root_gb_zero_disables_min_disk(self):
self.instance_type['root_gb'] = 0
image = dict(id='123', status='active', min_disk='2')
self.compute_api._check_requested_image(self.context, image['id'],
image, self.instance_type)
|
{
"content_hash": "fdb52b1ace837971b959fa6c86183e7b",
"timestamp": "",
"source": "github",
"line_count": 10802,
"max_line_length": 79,
"avg_line_length": 44.46000740603592,
"alnum_prop": 0.5684081647950993,
"repo_name": "nkrinner/nova",
"id": "7f47f7d7888c7645d3038426722a3e48d570c8ae",
"size": "481065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/compute/test_compute.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import asyncio
import logging
import os
import ssl
import threading
import traceback
from urllib.parse import urlparse
from typing import Any, Dict, List, Optional, Tuple
# TODO(bashi): Remove import check suppressions once aioquic dependency is resolved.
from aioquic.buffer import Buffer # type: ignore
from aioquic.asyncio import QuicConnectionProtocol, serve # type: ignore
from aioquic.asyncio.client import connect # type: ignore
from aioquic.h3.connection import H3_ALPN, FrameType, H3Connection, ProtocolError, Setting # type: ignore
from aioquic.h3.events import H3Event, HeadersReceived, WebTransportStreamDataReceived, DatagramReceived, DataReceived # type: ignore
from aioquic.quic.configuration import QuicConfiguration # type: ignore
from aioquic.quic.connection import logger as quic_connection_logger # type: ignore
from aioquic.quic.connection import stream_is_unidirectional
from aioquic.quic.events import QuicEvent, ProtocolNegotiated, ConnectionTerminated, StreamReset # type: ignore
from aioquic.tls import SessionTicket # type: ignore
from tools.wptserve.wptserve import stash # type: ignore
from .capsule import H3Capsule, H3CapsuleDecoder, CapsuleType
"""
A WebTransport over HTTP/3 server for testing.
The server interprets the underlying protocols (WebTransport, HTTP/3 and QUIC)
and passes events to a particular webtransport handler. From the standpoint of
test authors, a webtransport handler is a Python script which contains some
callback functions. See handler.py for available callbacks.
"""
SERVER_NAME = 'webtransport-h3-server'
_logger: logging.Logger = logging.getLogger(__name__)
_doc_root: str = ""
# Set aioquic's log level to WARNING to suppress some INFO logs which are
# recorded every connection close.
quic_connection_logger.setLevel(logging.WARNING)
class H3ConnectionWithDatagram04(H3Connection):
"""
A H3Connection subclass, to make it work with the latest
HTTP Datagram protocol.
"""
H3_DATAGRAM_04 = 0xffd277
# https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-h3-websockets-00#section-5
ENABLE_CONNECT_PROTOCOL = 0x08
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._supports_h3_datagram_04 = False
def _validate_settings(self, settings: Dict[int, int]) -> None:
H3_DATAGRAM_04 = H3ConnectionWithDatagram04.H3_DATAGRAM_04
if H3_DATAGRAM_04 in settings and settings[H3_DATAGRAM_04] == 1:
settings[Setting.H3_DATAGRAM] = 1
self._supports_h3_datagram_04 = True
return super()._validate_settings(settings)
def _get_local_settings(self) -> Dict[int, int]:
H3_DATAGRAM_04 = H3ConnectionWithDatagram04.H3_DATAGRAM_04
settings = super()._get_local_settings()
settings[H3_DATAGRAM_04] = 1
settings[H3ConnectionWithDatagram04.ENABLE_CONNECT_PROTOCOL] = 1
return settings
@property
def supports_h3_datagram_04(self) -> bool:
"""
True if the client supports the latest HTTP Datagram protocol.
"""
return self._supports_h3_datagram_04
class WebTransportH3Protocol(QuicConnectionProtocol):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._handler: Optional[Any] = None
self._http: Optional[H3ConnectionWithDatagram04] = None
self._session_stream_id: Optional[int] = None
self._close_info: Optional[Tuple[int, bytes]] = None
self._capsule_decoder_for_session_stream: H3CapsuleDecoder =\
H3CapsuleDecoder()
self._allow_calling_session_closed = True
self._allow_datagrams = False
def quic_event_received(self, event: QuicEvent) -> None:
if isinstance(event, ProtocolNegotiated):
self._http = H3ConnectionWithDatagram04(
self._quic, enable_webtransport=True)
if not self._http.supports_h3_datagram_04:
self._allow_datagrams = True
if self._http is not None:
for http_event in self._http.handle_event(event):
self._h3_event_received(http_event)
if isinstance(event, ConnectionTerminated):
self._call_session_closed(close_info=None, abruptly=True)
if isinstance(event, StreamReset):
if self._handler:
self._handler.stream_reset(event.stream_id, event.error_code)
def _h3_event_received(self, event: H3Event) -> None:
if isinstance(event, HeadersReceived):
# Convert from List[Tuple[bytes, bytes]] to Dict[bytes, bytes].
# Only the last header will be kept when there are duplicate
# headers.
headers = {}
for header, value in event.headers:
headers[header] = value
method = headers.get(b":method")
protocol = headers.get(b":protocol")
if method == b"CONNECT" and protocol == b"webtransport":
self._session_stream_id = event.stream_id
self._handshake_webtransport(event, headers)
else:
self._send_error_response(event.stream_id, 400)
if isinstance(event, DataReceived) and\
self._session_stream_id == event.stream_id:
if self._http and not self._http.supports_h3_datagram_04 and\
len(event.data) > 0:
raise ProtocolError('Unexpected data on the session stream')
self._receive_data_on_session_stream(
event.data, event.stream_ended)
elif self._handler is not None:
if isinstance(event, WebTransportStreamDataReceived):
self._handler.stream_data_received(
stream_id=event.stream_id,
data=event.data,
stream_ended=event.stream_ended)
elif isinstance(event, DatagramReceived):
if self._allow_datagrams:
self._handler.datagram_received(data=event.data)
def _receive_data_on_session_stream(self, data: bytes, fin: bool) -> None:
self._capsule_decoder_for_session_stream.append(data)
if fin:
self._capsule_decoder_for_session_stream.final()
for capsule in self._capsule_decoder_for_session_stream:
if capsule.type in {CapsuleType.DATAGRAM,
CapsuleType.REGISTER_DATAGRAM_CONTEXT,
CapsuleType.CLOSE_DATAGRAM_CONTEXT}:
raise ProtocolError(
"Unimplemented capsule type: {}".format(capsule.type))
if capsule.type in {CapsuleType.REGISTER_DATAGRAM_NO_CONTEXT,
CapsuleType.CLOSE_WEBTRANSPORT_SESSION}:
# We'll handle this case below.
pass
else:
# We should ignore unknown capsules.
continue
if self._close_info is not None:
raise ProtocolError((
"Receiving a capsule with type = {} after receiving " +
"CLOSE_WEBTRANSPORT_SESSION").format(capsule.type))
if capsule.type == CapsuleType.REGISTER_DATAGRAM_NO_CONTEXT:
buffer = Buffer(data=capsule.data)
format_type = buffer.pull_uint_var()
# https://ietf-wg-webtrans.github.io/draft-ietf-webtrans-http3/draft-ietf-webtrans-http3.html#name-datagram-format-type
WEBTRANPORT_FORMAT_TYPE = 0xff7c00
if format_type != WEBTRANPORT_FORMAT_TYPE:
raise ProtocolError(
"Unexpected datagram format type: {}".format(
format_type))
self._allow_datagrams = True
elif capsule.type == CapsuleType.CLOSE_WEBTRANSPORT_SESSION:
buffer = Buffer(data=capsule.data)
code = buffer.pull_uint32()
# 4 bytes for the uint32.
reason = buffer.pull_bytes(len(capsule.data) - 4)
# TODO(yutakahirano): Make sure `reason` is a UTF-8 text.
self._close_info = (code, reason)
if fin:
self._call_session_closed(self._close_info, abruptly=False)
def _send_error_response(self, stream_id: int, status_code: int) -> None:
assert self._http is not None
headers = [(b"server", SERVER_NAME.encode()),
(b":status", str(status_code).encode())]
self._http.send_headers(stream_id=stream_id,
headers=headers,
end_stream=True)
def _handshake_webtransport(self, event: HeadersReceived,
request_headers: Dict[bytes, bytes]) -> None:
assert self._http is not None
path = request_headers.get(b":path")
if path is None:
# `:path` must be provided.
self._send_error_response(event.stream_id, 400)
return
# Create a handler using `:path`.
try:
self._handler = self._create_event_handler(
session_id=event.stream_id,
path=path,
request_headers=event.headers)
except IOError:
self._send_error_response(event.stream_id, 404)
return
response_headers = [
(b"server", SERVER_NAME.encode()),
(b"sec-webtransport-http3-draft", b"draft02"),
]
self._handler.connect_received(response_headers=response_headers)
status_code = None
for name, value in response_headers:
if name == b":status":
status_code = value
break
if not status_code:
response_headers.append((b":status", b"200"))
self._http.send_headers(stream_id=event.stream_id,
headers=response_headers)
if status_code is None or status_code == b"200":
self._handler.session_established()
def _create_event_handler(self, session_id: int, path: bytes,
request_headers: List[Tuple[bytes, bytes]]) -> Any:
parsed = urlparse(path.decode())
file_path = os.path.join(_doc_root, parsed.path.lstrip("/"))
callbacks = {"__file__": file_path}
with open(file_path) as f:
exec(compile(f.read(), path, "exec"), callbacks)
session = WebTransportSession(self, session_id, request_headers)
return WebTransportEventHandler(session, callbacks)
def _call_session_closed(
self, close_info: Optional[Tuple[int, bytes]],
abruptly: bool) -> None:
allow_calling_session_closed = self._allow_calling_session_closed
self._allow_calling_session_closed = False
if self._handler and allow_calling_session_closed:
self._handler.session_closed(close_info, abruptly)
class WebTransportSession:
"""
A WebTransport session.
"""
def __init__(self, protocol: WebTransportH3Protocol, session_id: int,
request_headers: List[Tuple[bytes, bytes]]) -> None:
self.session_id = session_id
self.request_headers = request_headers
self._protocol: WebTransportH3Protocol = protocol
self._http: H3Connection = protocol._http
# Use the a shared default path for all handlers so that different
# WebTransport sessions can access the same store easily.
self._stash_path = '/webtransport/handlers'
self._stash: Optional[stash.Stash] = None
self._dict_for_handlers: Dict[str, Any] = {}
@property
def stash(self) -> stash.Stash:
"""A Stash object for storing cross-session state."""
if self._stash is None:
address, authkey = stash.load_env_config()
self._stash = stash.Stash(self._stash_path, address, authkey)
return self._stash
@property
def dict_for_handlers(self) -> Dict[str, Any]:
"""A dictionary that handlers can attach arbitrary data."""
return self._dict_for_handlers
def stream_is_unidirectional(self, stream_id: int) -> bool:
"""Return True if the stream is unidirectional."""
return stream_is_unidirectional(stream_id)
def close(self, close_info: Optional[Tuple[int, bytes]]) -> None:
"""
Close the session.
:param close_info The close information to send.
"""
self._protocol._allow_calling_session_closed = False
assert self._protocol._session_stream_id is not None
session_stream_id = self._protocol._session_stream_id
if close_info is not None:
code = close_info[0]
reason = close_info[1]
buffer = Buffer(capacity=len(reason) + 4)
buffer.push_uint32(code)
buffer.push_bytes(reason)
capsule =\
H3Capsule(CapsuleType.CLOSE_WEBTRANSPORT_SESSION, buffer.data)
self._http.send_data(session_stream_id, capsule.encode(), end_stream=False)
self._http.send_data(session_stream_id, b'', end_stream=True)
# TODO(yutakahirano): Reset all other streams.
# TODO(yutakahirano): Reject future stream open requests
# We need to wait for the stream data to arrive at the client, and then
# we need to close the connection. At this moment we're relying on the
# client's behavior.
# TODO(yutakahirano): Implement the above.
def create_unidirectional_stream(self) -> int:
"""
Create a unidirectional WebTransport stream and return the stream ID.
"""
return self._http.create_webtransport_stream(
session_id=self.session_id, is_unidirectional=True)
def create_bidirectional_stream(self) -> int:
"""
Create a bidirectional WebTransport stream and return the stream ID.
"""
stream_id = self._http.create_webtransport_stream(
session_id=self.session_id, is_unidirectional=False)
# TODO(bashi): Remove this workaround when aioquic supports receiving
# data on server-initiated bidirectional streams.
stream = self._http._get_or_create_stream(stream_id)
assert stream.frame_type is None
assert stream.session_id is None
stream.frame_type = FrameType.WEBTRANSPORT_STREAM
stream.session_id = self.session_id
return stream_id
def send_stream_data(self,
stream_id: int,
data: bytes,
end_stream: bool = False) -> None:
"""
Send data on the specific stream.
:param stream_id: The stream ID on which to send the data.
:param data: The data to send.
:param end_stream: If set to True, the stream will be closed.
"""
self._http._quic.send_stream_data(stream_id=stream_id,
data=data,
end_stream=end_stream)
def send_datagram(self, data: bytes) -> None:
"""
Send data using a datagram frame.
:param data: The data to send.
"""
if not self._protocol._allow_datagrams:
_logger.warn(
"Sending a datagram while that's now allowed - discarding it")
return
flow_id = self.session_id
if self._http.supports_h3_datagram_04:
# The REGISTER_DATAGRAM_NO_CONTEXT capsule was on the session
# stream, so we must have the ID of the stream.
assert self._protocol._session_stream_id is not None
# TODO(yutakahirano): Make sure if this is the correct logic.
# Chrome always use 0 for the initial stream and the initial flow
# ID, we cannot check the correctness with it.
flow_id = self._protocol._session_stream_id // 4
self._http.send_datagram(flow_id=flow_id, data=data)
def stop_stream(self, stream_id: int, code: int) -> None:
"""
Send a STOP_SENDING frame to the given stream.
:param code: the reason of the error.
"""
self._http._quic.stop_stream(stream_id, code)
def reset_stream(self, stream_id: int, code: int) -> None:
"""
Send a RESET_STREAM frame to the given stream.
:param code: the reason of the error.
"""
self._http._quic.reset_stream(stream_id, code)
class WebTransportEventHandler:
def __init__(self, session: WebTransportSession,
callbacks: Dict[str, Any]) -> None:
self._session = session
self._callbacks = callbacks
def _run_callback(self, callback_name: str,
*args: Any, **kwargs: Any) -> None:
if callback_name not in self._callbacks:
return
try:
self._callbacks[callback_name](*args, **kwargs)
except Exception as e:
_logger.warn(str(e))
traceback.print_exc()
def connect_received(self, response_headers: List[Tuple[bytes,
bytes]]) -> None:
self._run_callback("connect_received", self._session.request_headers,
response_headers)
def session_established(self) -> None:
self._run_callback("session_established", self._session)
def stream_data_received(self, stream_id: int, data: bytes,
stream_ended: bool) -> None:
self._run_callback("stream_data_received", self._session, stream_id,
data, stream_ended)
def datagram_received(self, data: bytes) -> None:
self._run_callback("datagram_received", self._session, data)
def session_closed(
self,
close_info: Optional[Tuple[int, bytes]],
abruptly: bool) -> None:
self._run_callback(
"session_closed", self._session, close_info, abruptly=abruptly)
def stream_reset(self, stream_id: int, error_code: int) -> None:
self._run_callback(
"stream_reset", self._session, stream_id, error_code)
class SessionTicketStore:
"""
Simple in-memory store for session tickets.
"""
def __init__(self) -> None:
self.tickets: Dict[bytes, SessionTicket] = {}
def add(self, ticket: SessionTicket) -> None:
self.tickets[ticket.ticket] = ticket
def pop(self, label: bytes) -> Optional[SessionTicket]:
return self.tickets.pop(label, None)
class WebTransportH3Server:
"""
A WebTransport over HTTP/3 for testing.
:param host: Host from which to serve.
:param port: Port from which to serve.
:param doc_root: Document root for serving handlers.
:param cert_path: Path to certificate file to use.
:param key_path: Path to key file to use.
:param logger: a Logger object for this server.
"""
def __init__(self, host: str, port: int, doc_root: str, cert_path: str,
key_path: str, logger: Optional[logging.Logger]) -> None:
self.host = host
self.port = port
self.doc_root = doc_root
self.cert_path = cert_path
self.key_path = key_path
self.started = False
global _doc_root
_doc_root = self.doc_root
global _logger
if logger is not None:
_logger = logger
def start(self) -> None:
"""Start the server."""
self.server_thread = threading.Thread(
target=self._start_on_server_thread, daemon=True)
self.server_thread.start()
self.started = True
def _start_on_server_thread(self) -> None:
configuration = QuicConfiguration(
alpn_protocols=H3_ALPN,
is_client=False,
max_datagram_frame_size=65536,
)
_logger.info("Starting WebTransport over HTTP/3 server on %s:%s",
self.host, self.port)
configuration.load_cert_chain(self.cert_path, self.key_path)
ticket_store = SessionTicketStore()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(
serve(
self.host,
self.port,
configuration=configuration,
create_protocol=WebTransportH3Protocol,
session_ticket_fetcher=ticket_store.pop,
session_ticket_handler=ticket_store.add,
))
self.loop.run_forever()
def stop(self) -> None:
"""Stop the server."""
if self.started:
asyncio.run_coroutine_threadsafe(self._stop_on_server_thread(),
self.loop)
self.server_thread.join()
_logger.info("Stopped WebTransport over HTTP/3 server on %s:%s",
self.host, self.port)
self.started = False
async def _stop_on_server_thread(self) -> None:
self.loop.stop()
def server_is_running(host: str, port: int, timeout: float) -> bool:
"""
Check the WebTransport over HTTP/3 server is running at the given `host` and
`port`.
"""
loop = asyncio.get_event_loop()
return loop.run_until_complete(_connect_server_with_timeout(host, port, timeout))
async def _connect_server_with_timeout(host: str, port: int, timeout: float) -> bool:
try:
await asyncio.wait_for(_connect_to_server(host, port), timeout=timeout)
except asyncio.TimeoutError:
_logger.warning("Failed to connect WebTransport over HTTP/3 server")
return False
return True
async def _connect_to_server(host: str, port: int) -> None:
configuration = QuicConfiguration(
alpn_protocols=H3_ALPN,
is_client=True,
verify_mode=ssl.CERT_NONE,
)
async with connect(host, port, configuration=configuration) as protocol:
await protocol.ping()
|
{
"content_hash": "fe6ac18887c212e63d3b31f1f5d4c9bd",
"timestamp": "",
"source": "github",
"line_count": 543,
"max_line_length": 135,
"avg_line_length": 40.59668508287293,
"alnum_prop": 0.6056976955180547,
"repo_name": "scheib/chromium",
"id": "1ff5ca8b6ca56f869878571f9f76492200af2f1c",
"size": "22044",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "third_party/wpt_tools/wpt/tools/webtransport/h3/webtransport_h3_server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import datetime
from operator import attrgetter
from django.core.exceptions import FieldError
from django.db import models
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import isolate_apps
from django.utils import translation
from .models import (
Article, ArticleIdea, ArticleTag, ArticleTranslation, Country, Friendship,
Group, Membership, NewsArticle, Person,
)
# Note that these tests are testing internal implementation details.
# ForeignObject is not part of public API.
class MultiColumnFKTests(TestCase):
@classmethod
def setUpTestData(cls):
# Creating countries
cls.usa = Country.objects.create(name="United States of America")
cls.soviet_union = Country.objects.create(name="Soviet Union")
# Creating People
cls.bob = Person.objects.create(name='Bob', person_country=cls.usa)
cls.jim = Person.objects.create(name='Jim', person_country=cls.usa)
cls.george = Person.objects.create(name='George', person_country=cls.usa)
cls.jane = Person.objects.create(name='Jane', person_country=cls.soviet_union)
cls.mark = Person.objects.create(name='Mark', person_country=cls.soviet_union)
cls.sam = Person.objects.create(name='Sam', person_country=cls.soviet_union)
# Creating Groups
cls.kgb = Group.objects.create(name='KGB', group_country=cls.soviet_union)
cls.cia = Group.objects.create(name='CIA', group_country=cls.usa)
cls.republican = Group.objects.create(name='Republican', group_country=cls.usa)
cls.democrat = Group.objects.create(name='Democrat', group_country=cls.usa)
def test_get_succeeds_on_multicolumn_match(self):
# Membership objects have access to their related Person if both
# country_ids match between them
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
person = membership.person
self.assertEqual((person.id, person.name), (self.bob.id, "Bob"))
def test_get_fails_on_multicolumn_mismatch(self):
# Membership objects returns DoesNotExist error when there is no
# Person with the same id and country_id
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jane.id, group_id=self.cia.id)
with self.assertRaises(Person.DoesNotExist):
getattr(membership, 'person')
def test_reverse_query_returns_correct_result(self):
# Creating a valid membership because it has the same country has the person
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
# Creating an invalid membership because it has a different country has the person
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.bob.id,
group_id=self.republican.id)
with self.assertNumQueries(1):
membership = self.bob.membership_set.get()
self.assertEqual(membership.group_id, self.cia.id)
self.assertIs(membership.person, self.bob)
def test_query_filters_correctly(self):
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
# Creating an invalid membership
Membership.objects.create(membership_country_id=self.soviet_union.id,
person_id=self.george.id, group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__name__contains='o'), [
self.bob.id
],
attrgetter("person_id")
)
def test_reverse_query_filters_correctly(self):
timemark = datetime.datetime.now(tz=datetime.timezone.utc).replace(tzinfo=None)
timedelta = datetime.timedelta(days=1)
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id, date_joined=timemark - timedelta)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gte=timemark), [
'Jim'
],
attrgetter('name')
)
def test_forward_in_lookup_filters_correctly(self):
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id)
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=[self.george, self.jim]), [
self.jim.id,
],
attrgetter('person_id')
)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=Person.objects.filter(name='Jim')), [
self.jim.id,
],
attrgetter('person_id')
)
def test_double_nested_query(self):
m1 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id)
m2 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
Friendship.objects.create(from_friend_country_id=self.usa.id, from_friend_id=self.bob.id,
to_friend_country_id=self.usa.id, to_friend_id=self.jim.id)
self.assertSequenceEqual(
Membership.objects.filter(
person__in=Person.objects.filter(
from_friend__in=Friendship.objects.filter(to_friend__in=Person.objects.all())
)
),
[m1]
)
self.assertSequenceEqual(
Membership.objects.exclude(
person__in=Person.objects.filter(
from_friend__in=Friendship.objects.filter(to_friend__in=Person.objects.all())
)
),
[m2]
)
def test_select_related_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(1):
people = [m.person for m in Membership.objects.select_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.all().order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
people = [
m.person for m in Membership.objects.prefetch_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_reverse_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
membership_sets = [
list(p.membership_set.all())
for p in Person.objects.prefetch_related('membership_set').order_by('pk')]
with self.assertNumQueries(7):
normal_membership_sets = [
list(p.membership_set.all())
for p in Person.objects.order_by('pk')
]
self.assertEqual(membership_sets, normal_membership_sets)
def test_m2m_through_forward_returns_valid_members(self):
# We start out by making sure that the Group 'CIA' has no members.
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.cia)
# Let's check to make sure that it worked. Bob and Jim should be members of the CIA.
self.assertQuerysetEqual(
self.cia.members.all(), [
'Bob',
'Jim'
], attrgetter("name")
)
def test_m2m_through_reverse_returns_valid_members(self):
# We start out by making sure that Bob is in no groups.
self.assertQuerysetEqual(
self.bob.groups.all(),
[]
)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.bob,
group=self.republican)
# Bob should be in the CIA and a Republican
self.assertQuerysetEqual(
self.bob.groups.all(), [
'CIA',
'Republican'
], attrgetter("name")
)
def test_m2m_through_forward_ignores_invalid_members(self):
# We start out by making sure that the Group 'CIA' has no members.
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
# Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country
Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia)
# There should still be no members in CIA
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
def test_m2m_through_reverse_ignores_invalid_members(self):
# We start out by making sure that Jane has no groups.
self.assertQuerysetEqual(
self.jane.groups.all(),
[]
)
# Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country
Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia)
# Jane should still not be in any groups
self.assertQuerysetEqual(
self.jane.groups.all(),
[]
)
def test_m2m_through_on_self_works(self):
self.assertQuerysetEqual(
self.jane.friends.all(),
[]
)
Friendship.objects.create(
from_friend_country=self.jane.person_country, from_friend=self.jane,
to_friend_country=self.george.person_country, to_friend=self.george)
self.assertQuerysetEqual(
self.jane.friends.all(),
['George'], attrgetter("name")
)
def test_m2m_through_on_self_ignores_mismatch_columns(self):
self.assertQuerysetEqual(self.jane.friends.all(), [])
# Note that we use ids instead of instances. This is because instances on ForeignObject
# properties will set all related field off of the given instance
Friendship.objects.create(
from_friend_id=self.jane.id, to_friend_id=self.george.id,
to_friend_country_id=self.jane.person_country_id,
from_friend_country_id=self.george.person_country_id)
self.assertQuerysetEqual(self.jane.friends.all(), [])
def test_prefetch_related_m2m_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
members_lists = [list(g.members.all())
for g in Group.objects.prefetch_related('members')]
normal_members_lists = [list(g.members.all()) for g in Group.objects.all()]
self.assertEqual(members_lists, normal_members_lists)
def test_prefetch_related_m2m_reverse_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
groups_lists = [list(p.groups.all()) for p in Person.objects.prefetch_related('groups')]
normal_groups_lists = [list(p.groups.all()) for p in Person.objects.all()]
self.assertEqual(groups_lists, normal_groups_lists)
@translation.override('fi')
def test_translations(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
at1_fi = ArticleTranslation(article=a1, lang='fi', title='Otsikko', body='Diipadaapa')
at1_fi.save()
at2_en = ArticleTranslation(article=a1, lang='en', title='Title', body='Lalalalala')
at2_en.save()
self.assertEqual(Article.objects.get(pk=a1.pk).active_translation, at1_fi)
with self.assertNumQueries(1):
fetched = Article.objects.select_related('active_translation').get(
active_translation__title='Otsikko')
self.assertEqual(fetched.active_translation.title, 'Otsikko')
a2 = Article.objects.create(pub_date=datetime.date.today())
at2_fi = ArticleTranslation(article=a2, lang='fi', title='Atsikko', body='Diipadaapa',
abstract='dipad')
at2_fi.save()
a3 = Article.objects.create(pub_date=datetime.date.today())
at3_en = ArticleTranslation(article=a3, lang='en', title='A title', body='lalalalala',
abstract='lala')
at3_en.save()
# Test model initialization with active_translation field.
a3 = Article(id=a3.id, pub_date=a3.pub_date, active_translation=at3_en)
a3.save()
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None)),
[a1, a3])
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None,
active_translation__pk__isnull=False)),
[a1])
with translation.override('en'):
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None)),
[a1, a2])
def test_foreign_key_raises_informative_does_not_exist(self):
referrer = ArticleTranslation()
with self.assertRaisesMessage(Article.DoesNotExist, 'ArticleTranslation has no article'):
referrer.article
def test_foreign_key_related_query_name(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
ArticleTag.objects.create(article=a1, name="foo")
self.assertEqual(Article.objects.filter(tag__name="foo").count(), 1)
self.assertEqual(Article.objects.filter(tag__name="bar").count(), 0)
msg = (
"Cannot resolve keyword 'tags' into field. Choices are: "
"active_translation, active_translation_q, articletranslation, "
"id, idea_things, newsarticle, pub_date, tag"
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(tags__name="foo")
def test_many_to_many_related_query_name(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
i1 = ArticleIdea.objects.create(name="idea1")
a1.ideas.add(i1)
self.assertEqual(Article.objects.filter(idea_things__name="idea1").count(), 1)
self.assertEqual(Article.objects.filter(idea_things__name="idea2").count(), 0)
msg = (
"Cannot resolve keyword 'ideas' into field. Choices are: "
"active_translation, active_translation_q, articletranslation, "
"id, idea_things, newsarticle, pub_date, tag"
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(ideas__name="idea1")
@translation.override('fi')
def test_inheritance(self):
na = NewsArticle.objects.create(pub_date=datetime.date.today())
ArticleTranslation.objects.create(
article=na, lang="fi", title="foo", body="bar")
self.assertSequenceEqual(
NewsArticle.objects.select_related('active_translation'),
[na]
)
with self.assertNumQueries(1):
self.assertEqual(
NewsArticle.objects.select_related(
'active_translation')[0].active_translation.title,
"foo")
@skipUnlessDBFeature('has_bulk_insert')
def test_batch_create_foreign_object(self):
objs = [Person(name="abcd_%s" % i, person_country=self.usa) for i in range(0, 5)]
Person.objects.bulk_create(objs, 10)
def test_isnull_lookup(self):
m1 = Membership.objects.create(membership_country=self.usa, person=self.bob, group_id=None)
m2 = Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
self.assertSequenceEqual(
Membership.objects.filter(group__isnull=True),
[m1],
)
self.assertSequenceEqual(
Membership.objects.filter(group__isnull=False),
[m2],
)
class TestModelCheckTests(SimpleTestCase):
@isolate_apps('foreign_object')
def test_check_composite_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
value = models.CharField(max_length=255)
parent = models.ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b'),
to_fields=('a', 'b'),
related_name='children',
)
self.assertEqual(Child._meta.get_field('parent').check(from_model=Child), [])
@isolate_apps('foreign_object')
def test_check_subset_composite_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
d = models.CharField(max_length=255)
parent = models.ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b', 'c'),
to_fields=('a', 'b', 'c'),
related_name='children',
)
self.assertEqual(Child._meta.get_field('parent').check(from_model=Child), [])
class TestExtraJoinFilterQ(TestCase):
@translation.override('fi')
def test_extra_join_filter_q(self):
a = Article.objects.create(pub_date=datetime.datetime.today())
ArticleTranslation.objects.create(article=a, lang='fi', title='title', body='body')
qs = Article.objects.all()
with self.assertNumQueries(2):
self.assertEqual(qs[0].active_translation_q.title, 'title')
qs = qs.select_related('active_translation_q')
with self.assertNumQueries(1):
self.assertEqual(qs[0].active_translation_q.title, 'title')
|
{
"content_hash": "de125467dd81c4d98100fcc6dc0129c1",
"timestamp": "",
"source": "github",
"line_count": 484,
"max_line_length": 100,
"avg_line_length": 42.55165289256198,
"alnum_prop": 0.6214615197863559,
"repo_name": "atul-bhouraskar/django",
"id": "72d50cad6b2321efeca4ba9d6155a374fdf5622c",
"size": "20595",
"binary": false,
"copies": "6",
"ref": "refs/heads/ticket_23424",
"path": "tests/foreign_object/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52098"
},
{
"name": "HTML",
"bytes": "174031"
},
{
"name": "JavaScript",
"bytes": "249623"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11309010"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
from se_benefactor.tests.tests_models import *
|
{
"content_hash": "d9150a1df69c904fbb9dd1cd9a86d2ad",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 46,
"avg_line_length": 47,
"alnum_prop": 0.8085106382978723,
"repo_name": "francisl/django-search-engine-tools",
"id": "66c8f28fffbb393840a86962b3e176b848175e4d",
"size": "47",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "search_engine_tools/tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5662"
}
],
"symlink_target": ""
}
|
from pyvows import Vows, expect
# TEST DATA
STRINGS = {
'that_are_files': (
__file__,
unicode(__file__),
),
'that_are_not_files': (
__doc__,
)
}
# HELPERS
isafile = lambda topic: expect(topic).to_be_a_file()
isnotafile = lambda topic: expect(topic).not_to_be_a_file()
# NOW, MAKE YOUR VOWS.
@Vows.batch
class WhenMakingFileAssertions(Vows.Context):
# @TODO: Clean up this repetitive test code
#
# Preferable one of the following:
#
# - context inheritance
# http://pyvows.org/#-context-inheritance
#
# - generative testing
# http://pyvows.org/#-using-generative-testing
class OnFilesThatDoNotExist(Vows.Context):
def topic(self):
for item in STRINGS['that_are_not_files']:
yield item
class AssertingThatTheyDo(Vows.Context):
@Vows.capture_error
def topic(self, parent_topic):
return isafile(parent_topic)
def should_raise_an_error(self, topic):
expect(topic).to_be_an_error_like(AssertionError)
class AssertingThatTheyDoNot(Vows.Context):
@Vows.capture_error
def topic(self, parent_topic):
return isnotafile(parent_topic)
def should_raise_no_errors(self, topic):
expect(topic).Not.to_be_an_error()
class OnFilesThatDoExist(Vows.Context):
def topic(self):
for item in STRINGS['that_are_files']:
yield item
class AssertingTheyAreFiles(Vows.Context):
@Vows.capture_error
def topic(self, parent_topic):
return isafile(parent_topic)
def should_not_raise_errors(self, topic):
expect(topic).not_to_be_an_error()
class AssertingTheyAreNotFiles(Vows.Context):
@Vows.capture_error
def topic(self, parent_topic):
return isnotafile(parent_topic)
def should_raise_an_error(self, topic):
expect(topic).to_be_an_error()
class WhenWeInstantiateThemAsFileObjects(Vows.Context):
def topic(self, parent_topic):
f = open(parent_topic)
return f
class AssertingTheyAreFiles(Vows.Context):
@Vows.capture_error
def topic(self, parent_topic):
return isafile(parent_topic)
def should_not_raise_errors(self, topic):
expect(topic).not_to_be_an_error()
class AssertingTheyAreNotFiles(Vows.Context):
@Vows.capture_error
def topic(self, parent_topic):
return isnotafile(parent_topic)
def should_raise_an_error(self, topic):
expect(topic).to_be_an_error()
|
{
"content_hash": "cf072611ebff631b5ca9c2e4daf79113",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 65,
"avg_line_length": 30.22680412371134,
"alnum_prop": 0.5545702592087313,
"repo_name": "marcelometal/pyvows",
"id": "92499227637f89251cdf4f2cb23af9df6e208c5b",
"size": "3186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/assertions/types/file_vows.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "412"
},
{
"name": "Python",
"bytes": "193088"
}
],
"symlink_target": ""
}
|
import discord_logging
log = discord_logging.get_logger()
import counters
from praw_wrapper import ReturnType
import utils
def send_queued_notifications(reddit, database):
count_pending_notifications = database.get_count_pending_notifications()
counters.queue.set(count_pending_notifications)
notifications_sent = 0
if count_pending_notifications > 0:
users_to_delete = set()
notifications = database.get_pending_notifications(utils.requests_available(count_pending_notifications))
for notification in notifications:
notifications_sent += 1
counters.notifications.inc()
counters.queue.dec()
if notification.subscription is None:
log.warning(
f"Notification for u/{notification.submission.author.name} in r/"
f"{notification.submission.subreddit.name} missing subscription, skipping: "
f"{notification.submission.submission_id}")
database.delete_notification(notification)
continue
log.info(
f"{notifications_sent}/{len(notifications)}/{count_pending_notifications}: Notifying u/"
f"{notification.subscription.subscriber.name} for u/{notification.submission.author.name} in r/"
f"{notification.subscription.subreddit.name} : {notification.submission.submission_id}")
submissions = database.get_recent_submissions_for_author(
notification.submission.author,
notification.submission.subreddit,
notification.submission.id,
3)
body_bldr = utils.get_footer(notification.render_notification(submissions))
subject_bldr = notification.render_subject()
result = reddit.send_message(notification.subscription.subscriber.name, ''.join(subject_bldr), ''.join(body_bldr))
notification.submission.messages_sent += 1
if result in [ReturnType.INVALID_USER, ReturnType.USER_DOESNT_EXIST]:
log.info(f"User doesn't exist: u/{notification.subscription.subscriber.name}")
users_to_delete.add(notification.subscription.subscriber)
if result in [ReturnType.NOT_WHITELISTED_BY_USER_MESSAGE]:
log.info(f"User blocked notification message: u/{notification.subscription.subscriber.name}")
if not notification.subscription.recurring:
log.debug(f"{notification.subscription.id} deleted")
database.delete_subscription(notification.subscription)
database.delete_notification(notification)
if notifications_sent % 50 == 0:
database.commit()
database.commit()
if len(users_to_delete):
for user in users_to_delete:
database.purge_user(user)
database.commit()
else:
log.debug("No notifications to send")
return notifications_sent
|
{
"content_hash": "c4061de36b3c529334b4e57d4e740d72",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 117,
"avg_line_length": 37.10144927536232,
"alnum_prop": 0.759375,
"repo_name": "Watchful1/RedditSubsBot",
"id": "e1d6daacc4f43844962ba83dafbe522be53191ce",
"size": "2560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/notifications.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75583"
}
],
"symlink_target": ""
}
|
import numpy
import math
import pygame
import time
from PIL import Image
from pygame.color import THECOLORS
from copy import copy
from nupic.research.spatial_pooler import SpatialPooler
DEBUG = 0
class SPViewer(object):
'''
This class provides a PyGame window that visualizes the behavior of the
Numenta Spatial Pooler algorithm for very small image inputs.
It is meant as an educational tool. As you change the parameters to the
SP you will get a better understanding of the errors that learning
algorithms such as the SP can make, and the strategies the SP uses to
overcome those errors.
'''
def __init__(self,
sp,
screenWidth = 512,
screenHeight = 512,
imagePath = None,
patchSide = 32,
patchOverlapPercent = 0,
epochCount = 40,
replayDelay = 0,
layout = None):
# Store properties
self.sp = sp
self.screenWidth = screenWidth
self.screenHeight = screenHeight
self.imagePath = imagePath
self.patchSide = patchSide
self.patchOverlapPercent = patchOverlapPercent
self.epochCount = epochCount
self.replayDelay = replayDelay
self.layout = layout
self.featuresCount = self.sp._columnDimensions
# Start up our display
pygame.init()
# Set up our screen
size = self.screenWidth, self.screenHeight
self.screen = pygame.display.set_mode(size)
# Start with a blank white canvas
self.screen.fill(THECOLORS['white'])
# Add labels
self._drawLabels(self.layout)
def run(self):
# Display the input image we'll be learning on
inputImage = pygame.image.load(self.imagePath).convert()
# Far left and centered vertically
iiX = 0
iiY = (.5 * self.screenHeight) - (.5 * inputImage.get_height())
self.screen.blit(inputImage, (iiX, iiY))
# Display an outer bounding box
self._drawBoundingBox(inputImage, iiX, iiY)
# Get some image patches on which to train
imagePatches = self._getPatchesFromImage(self.imagePath,
self.patchSide,
self.patchOverlapPercent)
# Convert those to bit vectors for input into CLA
vectorPatches = [self._convertToVector(patch[0]) for patch in imagePatches]
inputVectorLength = self.patchSide**2
# An array to store the Activity state of the neurons
activeArray = numpy.zeros(self.featuresCount)
# Draw permanences before any input or learning
self._drawPermanences()
# Feed in data and visualize the evolution of the permanences
for i in range(1, self.epochCount + 1):
print "Epoch:", i
columnEpochHistory = []
for j, patch in enumerate(vectorPatches):
# Display our sliding window
imagePatch, patchDimensions = imagePatches[j]
self._drawViewBox(patchDimensions, iiX, iiY)
# Show the patch CLA sees in a given iteration
# Position it near the middle and centered vertically
patchX = 160
patchY = (.5 * self.screenHeight) - (.5 * imagePatch.size[1])
self._drawPatch(imagePatch, patchX, patchY)
# Redraw base input image
self.screen.blit(inputImage, (iiX, iiY))
# Update the network
self.sp.compute(patch, True, activeArray)
# Draw column activations
self._drawColumnActivity(activeArray)
# Store those activations to later generate feature maps
columnEpochHistory.append(copy(activeArray))
# Slow things down for viewing
time.sleep(self.replayDelay)
# Display our perms after each epoch
self._drawPermanences()
# Draw feature maps
self._drawFeatureMaps(columnEpochHistory)
def _convertPILImageToPygameSurface(self, image):
'''
Returns a Pygame Surface instance built using data from a PIL Image
'''
mode = image.mode
size = image.size
data = image.tostring()
surf = pygame.image.frombuffer(data, size, mode)
return surf
def _convertToImage(self, listData, mode = '1'):
'''
Takes in a list and returns a new square image
'''
# Assume we're getting a square image patch
side = int(len(listData) ** 0.5)
# Create the new image of the right size
im = Image.new(mode, (side, side))
# Put the data into that patch
im.putdata(listData)
return im
def _convertToVector(self, image):
'''
Returns a bit vector representation (list of ints) of a PIL image.
'''
# Convert the image to black and white
image = image.convert('1',dither=Image.NONE)
# Pull out the data, turn that into a list, then a numpy array,
# then convert from 0 255 space to binary with a threshold.
# Finnally cast the values into a type CPP likes
vector = (numpy.array(list(image.getdata())) < 100).astype('uint32')
return vector
def _coordsToRect(self, coords):
'''
Returns a pygame Rect
'''
left = coords[0]
top = coords[1]
width = coords[2] - left
height = coords[3] - top
return pygame.Rect(left, top, width, height)
def _drawBoundingBox(self, image, x, y):
'''
Draws a Pygame.rect to screen that is a 1 pixel black box around the
given dimensions.
'''
color = THECOLORS['black']
boxDimensions = (x - 1,
y - 1,
x + image.get_width() + 2,
y + image.get_height() + 2)
rect = self._coordsToRect(boxDimensions)
width = 1
pygame.draw.rect(self.screen, color, rect, width)
def _drawLabels(self, layout):
'''
Draws the sections labels to the screen
TODO: Make this use a proper layout
'''
# Display some text
font = pygame.font.Font(None, 18)
text = font.render("Input Image", 1, (10, 10, 10))
self.screen.blit(text, (30, 10))
text = font.render("SP View", 1, (10, 10, 10))
self.screen.blit(text, (150, 10))
text = font.render("Activity", 1, (10, 10, 10))
self.screen.blit(text, (210, 10))
text = font.render("Perms", 1, (10, 10, 10))
self.screen.blit(text, (270, 10))
text = font.render("Connected", 1, (10, 10, 10))
self.screen.blit(text, (320, 10))
text = font.render("Feature Maps", 1, (10, 10, 10))
self.screen.blit(text, (400, 10))
def _drawPatch(self, im, x, y):
'''
Draws a patch to screen and updates the display
patch - a PIL image object
x, y - coords of where to draw the patch on screen
TODO: Show the B+W converted version which is what SP actually gets
'''
mode = im.mode
size = im.size
data = im.tostring()
im = pygame.image.frombuffer(data, size, mode)
# Draw in the background
self.screen.blit(im, (x, y))
# Display an outer bounding box
self._drawBoundingBox(im, x, y)
# Update the screen
pygame.display.flip()
def _drawPermanences(self):
for i in range(self.featuresCount):
perms = self.sp._permanences.getRow(i)
# Convert perms to RGB (effective grayscale) values
allPerms = [(v, v, v) for v in ((1 - perms) * 255).astype('int')]
connectedPerms = perms >= self.sp._synPermConnected
connectedPerms = (numpy.invert(connectedPerms) * 255).astype('int')
connectedPerms = [(v, v, v) for v in connectedPerms]
allPermsReconstruction = self._convertToImage(allPerms, 'RGB')
connectedReconstruction = self._convertToImage(connectedPerms, 'RGB')
size = allPermsReconstruction.size
# Convert that to a format that Pygame can use
pRSurface = self._convertPILImageToPygameSurface(allPermsReconstruction)
cSSurface = self._convertPILImageToPygameSurface(connectedReconstruction)
# Define where we'll draw that on the screen
xOffset = 272
yOffSet = (.5 * self.screenHeight) - (.5 * (self.featuresCount * size[1]))
# Line
x = xOffset
x2 = x + 64
y = yOffSet + i * self.patchSide
# Square
#x = (i % 4 * patchSide) + xOffset
#y = math.floor( i / 4 ) * patchSide
# Draw in the background
self.screen.blit(pRSurface, (x, y))
self.screen.blit(cSSurface, (x2, y))
def _drawColumnActivity(self, columnActivity):
# How large a square we want to represent a column
columnVizSize = 16
totalHeight = columnVizSize * len(columnActivity)
vertOffset = (self.screenHeight * .5) - (.5 * totalHeight)
for i, value in enumerate(columnActivity):
color = THECOLORS['black']
x1 = 224
y1 = vertOffset + (i * columnVizSize)
x2 = x1 + columnVizSize
y2 = y1 + columnVizSize
dimensions = (x1, y1, x2, y2)
rect = self._coordsToRect(dimensions)
if value:
width = 0
else:
width = 1
# Clear
pygame.draw.rect(self.screen, THECOLORS['white'], rect, 0)
# Redraw
pygame.draw.rect(self.screen, color, rect, width)
def _drawFeatureMaps(self, columnEpochHistory):
'''
Draws a feature map per column for the previous epoch
'''
mapSide = len(columnEpochHistory) ** .5
scaleFactor = 32 / mapSide
mapSide = int(mapSide * scaleFactor)
# Create maps
featureMaps = []
columnsHistory = zip(*columnEpochHistory)
for columnHistory in columnsHistory:
cH = numpy.array(columnHistory)
cH = [(v, v, v) for v in ((1-cH) * 255).astype('int')]
mapImage = self._convertToImage(cH, 'RGB')
largeMapImage = mapImage.resize((mapSide, mapSide))
featureMaps.append(largeMapImage)
# Draw
for i, fMap in enumerate(featureMaps):
# Define where we'll draw that on the screen
xOffset = 400
yOffSet = (.5 * self.screenHeight) - (.5 * (len(featureMaps) * mapSide))
# Line
x = xOffset
y = yOffSet + i * mapSide
# Square
#x = (i % 4 * patchSide) + xOffset
#y = math.floor( i / 4 ) * patchSide
# Draw in the background
self.screen.blit(self._convertPILImageToPygameSurface(fMap), (x, y))
# Display an outer bounding box
color = THECOLORS['black']
dimensions = (x-1, y-1, x+mapSide+2, y+mapSide+2)
rect = self._coordsToRect(dimensions)
width = 1
pygame.draw.rect(self.screen, color, rect, width)
def _drawViewBox(self, patchDimensions, baseX, baseY):
'''
Draws a rect to the screen in the same location as patch
'''
color = THECOLORS['black']
dimensions = copy(patchDimensions)
dimensions[1] += baseY
dimensions[3] += baseY
rect = self._coordsToRect(dimensions)
width = 1
pygame.draw.rect(self.screen, color, rect, width)
def _getPatchesFromImage(self,
imageName,
patchSide = 32 ,
overlap = 0.0):
'''
Returns a list of lists representing bit vector patches of imageName
'''
# Prevent infinite loop
assert overlap < 1
# Open the training image
inputImage = Image.open(imageName)
if DEBUG == 1:
inputImage.show()
# Get its dimensions
_, _, imageWidth, imageHeight = inputImage.getbbox()
if DEBUG == 1:
print imageWidth, imageHeight
# Define the size of our patch
x1 = 0
y1 = 0
x2 = patchSide
y2 = patchSide
# Divide our image into patches
patches = []
counter = 0
# Loop over each row of imageHeight patchSide
while y2 <= imageHeight:
x1 = 0
x2 = patchSide
# Loop over each column of imageWidth patchSide
while x2 <= imageWidth:
# Get our patch and then update the coords for the next loop
target = [x1, y1, x2, y2]
if DEBUG == 1:
print target
patch = inputImage.crop(target)
patches.append([patch, target])
# Increment our counter
counter += 1
if DEBUG == 1:
print 'This is input pattern %d' % counter
print patch
patch.show()
# Move the patch over by a percent to allow for overlap of patches
move = 1 - overlap
move = int(math.floor(patchSide * move))
x1 += move
x2 += move
# Move the patch down
y1 += move
y2 += move
return patches
|
{
"content_hash": "34f20a27c201e81647430da60f017296",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 80,
"avg_line_length": 29.14351851851852,
"alnum_prop": 0.6062748212867355,
"repo_name": "iandanforth/spviewer",
"id": "feefb06d22d0a0f3f41fba1adc3ef731e3d75712",
"size": "12590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sp_viewer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14152"
}
],
"symlink_target": ""
}
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("desecapi", "0025_alter_token_max_age_alter_token_max_unused_period"),
]
operations = [
migrations.RemoveField(
model_name="domain",
name="replicated",
),
migrations.RemoveField(
model_name="domain",
name="replication_duration",
),
]
|
{
"content_hash": "9986653246751b5423339df9f9b8f20a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 79,
"avg_line_length": 22.736842105263158,
"alnum_prop": 0.5671296296296297,
"repo_name": "desec-io/desec-stack",
"id": "0a1cc5c7f86fc73824f30fc01245fec368032db2",
"size": "479",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "api/desecapi/migrations/0026_remove_domain_replicated_and_more.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "5786"
},
{
"name": "HTML",
"bytes": "3777"
},
{
"name": "JavaScript",
"bytes": "22126"
},
{
"name": "Python",
"bytes": "716037"
},
{
"name": "Shell",
"bytes": "10425"
},
{
"name": "Vue",
"bytes": "234220"
}
],
"symlink_target": ""
}
|
"""
Results handler for a multiprocessing setup of unittest.py
"""
import enum
import warnings
import operator
import queue
import threading
import time
import unittest
import unittest.case
import unittest.result
from nitpycker.excinfo import FrozenExcInfo
__author__ = "Benjamin Schubert, ben.c.schubert@gmail.com"
class TestState(enum.Enum):
"""
Represents all possible results for a test
"""
success = "success"
failure = "failures"
error = "errors"
skipped = "skipped"
expected_failure = "expected failures"
unexpected_success = "unexpected successes"
serialization_failure = "Serialization failure"
class SerializationWarning(UserWarning):
"""
Warning to be raised when a solveable problem appeared while serializing an object
"""
class InterProcessResult(unittest.result.TestResult):
"""
A TestResult implementation to put results in a queue, for another thread to consume
"""
def __init__(self, result_queue: queue.Queue):
super().__init__()
self.result_queue = result_queue
self.start_time = self.stop_time = None
def startTest(self, test: unittest.case.TestCase) -> None:
"""
Saves the time before starting the test
:param test: the test that is going to be run
"""
self.start_time = time.time()
def add_result(self, _type, test, exc_info=None):
"""
Adds the given result to the list
:param _type: type of the state of the test (TestState.failure, TestState.error, ...)
:param test: the test
:param exc_info: additional execution information
"""
if exc_info is not None:
exc_info = FrozenExcInfo(exc_info)
test.time_taken = time.time() - self.start_time
test._outcome = None
self.result_queue.put((_type, test, exc_info))
def addSuccess(self, test: unittest.case.TestCase) -> None:
"""
Transforms the test in a serializable version of it and sends it to a queue for further analysis
:param test: the test to save
"""
# noinspection PyTypeChecker
self.add_result(TestState.success, test)
def addFailure(self, test: unittest.case.TestCase, exc_info: tuple) -> None:
"""
Transforms the test in a serializable version of it and sends it to a queue for further analysis
:param test: the test to save
:param exc_info: tuple of the form (Exception class, Exception instance, traceback)
"""
# noinspection PyTypeChecker
self.add_result(TestState.failure, test, exc_info)
def addError(self, test: unittest.case.TestCase, exc_info: tuple) -> None:
"""
Transforms the test in a serializable version of it and sends it to a queue for further analysis
:param test: the test to save
:param exc_info: tuple of the form (Exception class, Exception instance, traceback)
"""
# noinspection PyTypeChecker
self.add_result(TestState.error, test, exc_info)
def addExpectedFailure(self, test: unittest.case.TestCase, err: tuple) -> None:
"""
Transforms the test in a serializable version of it and sends it to a queue for further analysis
:param test: the test to save
:param err: tuple of the form (Exception class, Exception instance, traceback)
"""
# noinspection PyTypeChecker
self.add_result(TestState.expected_failure, test, err)
def addUnexpectedSuccess(self, test: unittest.case.TestCase) -> None:
"""
Transforms the test in a serializable version of it and sends it to a queue for further analysis
:param test: the test to save
"""
# noinspection PyTypeChecker
self.add_result(TestState.unexpected_success, test)
def addSkip(self, test: unittest.case.TestCase, reason: str):
"""
Transforms the test in a serializable version of it and sends it to a queue for further analysis
:param test: the test to save
:param reason: the reason why the test was skipped
"""
test.time_taken = time.time() - self.start_time
test._outcome = None
self.result_queue.put((TestState.skipped, test, reason))
class ResultCollector(threading.Thread, unittest.result.TestResult):
"""
Results handler. Given a report queue, will reform a complete report from it as what would come from a run
of unittest.TestResult
:param stream: stream on which to write information
:param descriptions: whether to display tests descriptions or not
:param verbosity: the verbosity used for the test result reporters
:param result_queue: queue form which to get the test results
:param test_results: list of testResults instances to use
:param tests: list of tests that are currently run
"""
def __init__(self, stream=None, descriptions=None, verbosity=None, *, result_queue: queue.Queue, test_results,
tests):
threading.Thread.__init__(self)
unittest.result.TestResult.__init__(self, stream, descriptions, verbosity)
self.test_results = test_results
for testResult in self.test_results:
if hasattr(testResult, "separator1"):
self.separator1 = testResult.separator1
break
for testResult in self.test_results:
if hasattr(testResult, "separator2"):
self.separator2 = testResult.separator2
break
self.result_queue = result_queue
self.cleanup = False
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.stream = stream
self.descriptions = descriptions
self.tests = tests
def end_collection(self) -> None:
""" Tells the thread that is it time to end """
self.cleanup = True
def _call_test_results(self, method_name, *args, **kwargs):
"""
calls the given method on every test results instances
:param method_name: name of the method to call
:param args: arguments to pass to the method
:param kwargs: keyword arguments to pass to the method
"""
method = operator.methodcaller(method_name, *args, **kwargs)
for testResult in self.test_results:
method(testResult)
# noinspection PyPep8Naming
def getDescription(self, test):
"""
Get the description of the test
:param test: test from which to get the description
:return: description of the test
"""
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
return str(test)
def test_info(self, test):
"""
writes test description on the stream used for reporting
:param test: test for which to display information
"""
if self.showAll:
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addError(self, test, err):
"""
registers a test as error
:param test: test to register
:param err: error the test gave
"""
super().addError(test, err)
self.test_info(test)
self._call_test_results('addError', test, err)
def addExpectedFailure(self, test, err):
"""
registers as test as expected failure
:param test: test to register
:param err: error the test gave
"""
super().addExpectedFailure(test, err)
self.test_info(test)
self._call_test_results('addExpectedFailure', test, err)
def addFailure(self, test, err):
"""
registers a test as failure
:param test: test to register
:param err: error the test gave
"""
super().addFailure(test, err)
self.test_info(test)
self._call_test_results('addFailure', test, err)
def addSkip(self, test, reason):
"""
registers a test as skipped
:param test: test to register
:param reason: reason why the test was skipped
"""
super().addSkip(test, reason)
self.test_info(test)
self._call_test_results('addSkip', test, reason)
def addSuccess(self, test):
"""
registers a test as successful
:param test: test to register
"""
super().addSuccess(test)
self.test_info(test)
self._call_test_results('addSuccess', test)
def addUnexpectedSuccess(self, test):
"""
registers a test as an unexpected success
:param test: test to register
"""
super().addUnexpectedSuccess(test)
self.test_info(test)
self._call_test_results('addUnexpectedSuccess', test)
def printErrors(self):
"""
print test report
"""
self._call_test_results('printErrors')
def run(self) -> None:
"""
processes entries in the queue until told to stop
"""
while not self.cleanup:
try:
result, test, additional_info = self.result_queue.get(timeout=1)
except queue.Empty:
continue
self.result_queue.task_done()
if result == TestState.serialization_failure:
test = self.tests[test]
warnings.warn("Serialization error: {} on test {}".format(
additional_info, test), SerializationWarning)
test(self)
else:
self.testsRun += 1
if result == TestState.success:
self.addSuccess(test)
elif result == TestState.failure:
self.addFailure(test, additional_info)
elif result == TestState.error:
self.addError(test, additional_info)
elif result == TestState.skipped:
self.addSkip(test, additional_info)
elif result == TestState.expected_failure:
self.addExpectedFailure(test, additional_info)
elif result == TestState.unexpected_success:
self.addUnexpectedSuccess(test)
else:
raise Exception("This is not a valid test type :", result)
|
{
"content_hash": "4fd6133cbde5daab7f76a8ef24cfb265",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 114,
"avg_line_length": 33.25079365079365,
"alnum_prop": 0.6125644452931067,
"repo_name": "BenjaminSchubert/NitPycker",
"id": "e9f572de577953c025532dc91d637c8e0f553d04",
"size": "10474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nitpycker/result.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38792"
}
],
"symlink_target": ""
}
|
from unittest import mock
from oslo_config import cfg
from requests import exceptions
import yaml
from heat.common import exception
from heat.common import identifier
from heat.common import template_format
from heat.common import urlfetch
from heat.engine import api
from heat.engine import node_data
from heat.engine import resource
from heat.engine.resources.aws.cfn import stack as stack_res
from heat.engine import rsrc_defn
from heat.engine import stack as parser
from heat.engine import template
from heat.objects import resource_data as resource_data_object
from heat.objects import stack as stack_object
from heat.tests import common
from heat.tests import generic_resource as generic_rsrc
from heat.tests import utils
class NestedStackTest(common.HeatTestCase):
test_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: https://server.test/the.template
Parameters:
KeyName: foo
'''
nested_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
KeyName:
Type: String
Outputs:
Foo:
Value: bar
'''
update_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
KeyName:
Type: String
Outputs:
Bar:
Value: foo
'''
def setUp(self):
super(NestedStackTest, self).setUp()
self.patchobject(urlfetch, 'get')
def validate_stack(self, template):
t = template_format.parse(template)
stack = self.parse_stack(t)
res = stack.validate()
self.assertIsNone(res)
return stack
def parse_stack(self, t, data=None):
ctx = utils.dummy_context('test_username', 'aaaa', 'password')
stack_name = 'test_stack'
tmpl = template.Template(t)
stack = parser.Stack(ctx, stack_name, tmpl, adopt_stack_data=data)
stack.store()
return stack
@mock.patch.object(parser.Stack, 'total_resources')
def test_nested_stack_three_deep(self, tr):
root_template = '''
HeatTemplateFormatVersion: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth1.template'
'''
depth1_template = '''
HeatTemplateFormatVersion: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth2.template'
'''
depth2_template = '''
HeatTemplateFormatVersion: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth3.template'
Parameters:
KeyName: foo
'''
urlfetch.get.side_effect = [
depth1_template,
depth2_template,
self.nested_template]
tr.return_value = 2
self.validate_stack(root_template)
calls = [mock.call('https://server.test/depth1.template'),
mock.call('https://server.test/depth2.template'),
mock.call('https://server.test/depth3.template')]
urlfetch.get.assert_has_calls(calls)
@mock.patch.object(parser.Stack, 'total_resources')
def test_nested_stack_six_deep(self, tr):
tmpl = '''
HeatTemplateFormatVersion: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth%i.template'
'''
root_template = tmpl % 1
depth1_template = tmpl % 2
depth2_template = tmpl % 3
depth3_template = tmpl % 4
depth4_template = tmpl % 5
depth5_template = tmpl % 6
depth5_template += '''
Parameters:
KeyName: foo
'''
urlfetch.get.side_effect = [
depth1_template,
depth2_template,
depth3_template,
depth4_template,
depth5_template,
self.nested_template]
tr.return_value = 5
t = template_format.parse(root_template)
stack = self.parse_stack(t)
stack['Nested'].root_stack_id = '1234'
res = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertIn('Recursion depth exceeds', str(res))
calls = [mock.call('https://server.test/depth1.template'),
mock.call('https://server.test/depth2.template'),
mock.call('https://server.test/depth3.template'),
mock.call('https://server.test/depth4.template'),
mock.call('https://server.test/depth5.template'),
mock.call('https://server.test/depth6.template')]
urlfetch.get.assert_has_calls(calls)
def test_nested_stack_four_wide(self):
root_template = '''
HeatTemplateFormatVersion: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth1.template'
Parameters:
KeyName: foo
Nested2:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth2.template'
Parameters:
KeyName: foo
Nested3:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth3.template'
Parameters:
KeyName: foo
Nested4:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/depth4.template'
Parameters:
KeyName: foo
'''
urlfetch.get.return_value = self.nested_template
self.validate_stack(root_template)
calls = [mock.call('https://server.test/depth1.template'),
mock.call('https://server.test/depth2.template'),
mock.call('https://server.test/depth3.template'),
mock.call('https://server.test/depth4.template')]
urlfetch.get.assert_has_calls(calls, any_order=True)
@mock.patch.object(parser.Stack, 'total_resources')
def test_nested_stack_infinite_recursion(self, tr):
tmpl = '''
HeatTemplateFormatVersion: 2012-12-12
Resources:
Nested:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: 'https://server.test/the.template'
'''
urlfetch.get.return_value = tmpl
t = template_format.parse(tmpl)
stack = self.parse_stack(t)
stack['Nested'].root_stack_id = '1234'
tr.return_value = 2
res = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertIn('Recursion depth exceeds', str(res))
expected_count = cfg.CONF.get('max_nested_stack_depth') + 1
self.assertEqual(expected_count, urlfetch.get.call_count)
def test_child_params(self):
t = template_format.parse(self.test_template)
stack = self.parse_stack(t)
nested_stack = stack['the_nested']
nested_stack.properties.data[nested_stack.PARAMETERS] = {'foo': 'bar'}
self.assertEqual({'foo': 'bar'}, nested_stack.child_params())
def test_child_template_when_file_is_fetched(self):
urlfetch.get.return_value = 'template_file'
t = template_format.parse(self.test_template)
stack = self.parse_stack(t)
nested_stack = stack['the_nested']
with mock.patch('heat.common.template_format.parse') as mock_parse:
mock_parse.return_value = 'child_template'
self.assertEqual('child_template', nested_stack.child_template())
mock_parse.assert_called_once_with(
'template_file', 'https://server.test/the.template')
def test_child_template_when_fetching_file_fails(self):
urlfetch.get.side_effect = exceptions.RequestException()
t = template_format.parse(self.test_template)
stack = self.parse_stack(t)
nested_stack = stack['the_nested']
self.assertRaises(ValueError, nested_stack.child_template)
def test_child_template_when_io_error(self):
msg = 'Failed to retrieve template'
urlfetch.get.side_effect = urlfetch.URLFetchError(msg)
t = template_format.parse(self.test_template)
stack = self.parse_stack(t)
nested_stack = stack['the_nested']
self.assertRaises(ValueError, nested_stack.child_template)
def test_refid(self):
t = template_format.parse(self.test_template)
stack = self.parse_stack(t)
nested_stack = stack['the_nested']
self.assertEqual('the_nested', nested_stack.FnGetRefId())
def test_refid_convergence_cache_data(self):
t = template_format.parse(self.test_template)
tmpl = template.Template(t)
ctx = utils.dummy_context()
cache_data = {'the_nested': node_data.NodeData.from_dict({
'uuid': mock.ANY,
'id': mock.ANY,
'action': 'CREATE',
'status': 'COMPLETE',
'reference_id': 'the_nested_convg_mock'
})}
stack = parser.Stack(ctx, 'test_stack', tmpl, cache_data=cache_data)
nested_stack = stack.defn['the_nested']
self.assertEqual('the_nested_convg_mock', nested_stack.FnGetRefId())
def test_get_attribute(self):
tmpl = template_format.parse(self.test_template)
ctx = utils.dummy_context('test_username', 'aaaa', 'password')
stack = parser.Stack(ctx, 'test',
template.Template(tmpl))
stack.store()
stack_res = stack['the_nested']
stack_res.store()
nested_t = template_format.parse(self.nested_template)
nested_t['Parameters']['KeyName']['Default'] = 'Key'
nested_stack = parser.Stack(ctx, 'test_nested',
template.Template(nested_t))
nested_stack.store()
stack_res._rpc_client = mock.MagicMock()
stack_res._rpc_client.show_stack.return_value = [
api.format_stack(nested_stack)]
stack_res.nested_identifier = mock.Mock()
stack_res.nested_identifier.return_value = {'foo': 'bar'}
self.assertEqual('bar', stack_res.FnGetAtt('Outputs.Foo'))
class ResDataResource(generic_rsrc.GenericResource):
def handle_create(self):
self.data_set("test", 'A secret value', True)
class ResDataStackTest(common.HeatTestCase):
tmpl = '''
HeatTemplateFormatVersion: "2012-12-12"
Parameters:
KeyName:
Type: String
Resources:
res:
Type: "res.data.resource"
Outputs:
Foo:
Value: bar
'''
def setUp(self):
super(ResDataStackTest, self).setUp()
resource._register_class("res.data.resource", ResDataResource)
def create_stack(self, template):
t = template_format.parse(template)
stack = utils.parse_stack(t)
stack.create()
self.assertEqual((stack.CREATE, stack.COMPLETE), stack.state)
return stack
def test_res_data_delete(self):
stack = self.create_stack(self.tmpl)
res = stack['res']
stack.delete()
self.assertEqual((stack.DELETE, stack.COMPLETE), stack.state)
self.assertRaises(
exception.NotFound,
resource_data_object.ResourceData.get_val, res, 'test')
class NestedStackCrudTest(common.HeatTestCase):
nested_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
KeyName:
Type: String
Outputs:
Foo:
Value: bar
'''
def setUp(self):
super(NestedStackCrudTest, self).setUp()
self.ctx = utils.dummy_context('test_username', 'aaaa', 'password')
empty_template = {"HeatTemplateFormatVersion": "2012-12-12"}
self.stack = parser.Stack(self.ctx, 'test',
template.Template(empty_template))
self.stack.store()
self.patchobject(urlfetch, 'get', return_value=self.nested_template)
self.nested_parsed = yaml.safe_load(self.nested_template)
self.nested_params = {"KeyName": "foo"}
self.defn = rsrc_defn.ResourceDefinition(
'test_t_res',
'AWS::CloudFormation::Stack',
{"TemplateURL": "https://server.test/the.template",
"Parameters": self.nested_params})
self.res = stack_res.NestedStack('test_t_res',
self.defn, self.stack)
self.assertIsNone(self.res.validate())
self.res.store()
self.patchobject(stack_object.Stack, 'get_status',
return_value=('CREATE', 'COMPLETE',
'Created', 'Sometime'))
def test_handle_create(self):
self.res.create_with_template = mock.Mock(return_value=None)
self.res.handle_create()
self.res.create_with_template.assert_called_once_with(
self.nested_parsed, self.nested_params, None, adopt_data=None)
def test_handle_adopt(self):
self.res.create_with_template = mock.Mock(return_value=None)
self.res.handle_adopt(resource_data={'resource_id': 'fred'})
self.res.create_with_template.assert_called_once_with(
self.nested_parsed, self.nested_params, None,
adopt_data={'resource_id': 'fred'})
def test_handle_update(self):
self.res.update_with_template = mock.Mock(return_value=None)
self.res.handle_update(self.defn, None, None)
self.res.update_with_template.assert_called_once_with(
self.nested_parsed, self.nested_params, None)
def test_handle_delete(self):
self.res.rpc_client = mock.MagicMock()
self.res.action = self.res.CREATE
self.res.nested_identifier = mock.MagicMock()
stack_identity = identifier.HeatIdentifier(
self.ctx.tenant_id,
self.res.physical_resource_name(),
self.res.resource_id)
self.res.nested_identifier.return_value = stack_identity
self.res.resource_id = stack_identity.stack_id
self.res.handle_delete()
self.res.rpc_client.return_value.delete_stack.assert_called_once_with(
self.ctx, stack_identity, cast=False)
|
{
"content_hash": "ade54a17bf6ce664580e71cff6f31f8d",
"timestamp": "",
"source": "github",
"line_count": 418,
"max_line_length": 78,
"avg_line_length": 34.069377990430624,
"alnum_prop": 0.6192683098097044,
"repo_name": "openstack/heat",
"id": "33a9ab9fd99f55120632eb40c99e615df60d16a8",
"size": "14817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/test_nested_stack.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9145593"
},
{
"name": "Shell",
"bytes": "65832"
}
],
"symlink_target": ""
}
|
import collections
import os
import re
import sys
import textwrap
import traceback
from datetime import datetime
from itertools import chain
from urlparse import urljoin
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import Sum
from django.template import Context, Template
from django.utils import translation
import requests
import olympia.core.logger
from olympia import amo, core
from olympia.activity.models import ActivityLog
from olympia.addons.models import Addon, AddonCategory, AddonUser, Category
from olympia.amo.celery import task
from olympia.amo.decorators import write
from olympia.amo.helpers import absolutify
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import chunked, send_mail, sorted_groupby
from olympia.constants.categories import CATEGORIES
from olympia.devhub.tasks import run_validator
from olympia.files.models import FileUpload
from olympia.files.utils import parse_addon
from olympia.lib.crypto.packaged import sign_file
from olympia.users.models import UserProfile
from olympia.users.utils import get_task_user
from olympia.versions.models import License, Version
from olympia.zadmin.models import (
EmailPreviewTopic, ValidationJob, ValidationResult)
log = olympia.core.logger.getLogger('z.task')
@task(rate_limit='3/s')
def admin_email(all_recipients, subject, body, preview_only=False,
from_email=settings.DEFAULT_FROM_EMAIL,
preview_topic='admin_email', **kw):
log.info('[%s@%s] admin_email about %r'
% (len(all_recipients), admin_email.rate_limit, subject))
if preview_only:
send = EmailPreviewTopic(topic=preview_topic).send_mail
else:
send = send_mail
for recipient in all_recipients:
send(subject, body, recipient_list=[recipient], from_email=from_email)
def tally_job_results(job_id, **kw):
sql = """select sum(1),
sum(case when completed IS NOT NULL then 1 else 0 end)
from validation_result
where validation_job_id=%s"""
with connection.cursor() as cursor:
cursor.execute(sql, [job_id])
total, completed = cursor.fetchone()
if completed == total:
# The job has finished.
job = ValidationJob.objects.get(pk=job_id)
job.update(completed=datetime.now())
if job.finish_email:
send_mail(u'Behold! Validation results for %s %s->%s'
% (amo.APP_IDS[job.application].pretty,
job.curr_max_version.version,
job.target_version.version),
textwrap.dedent("""
Aww yeah
%s
""" % absolutify(reverse('zadmin.validation'))),
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[job.finish_email])
@task(rate_limit='6/s')
@write
def bulk_validate_file(result_id, **kw):
res = ValidationResult.objects.get(pk=result_id)
task_error = None
validation = None
file_base = os.path.basename(res.file.file_path)
try:
log.info('[1@None] Validating file %s (%s) for result_id %s'
% (res.file, file_base, res.id))
target = res.validation_job.target_version
guid = amo.APP_IDS[target.application].guid
ver = {guid: [target.version]}
# Set min/max so the validator only tests for compatibility with
# the target version. Note that previously we explicitly checked
# for compatibility with older versions. See bug 675306 for
# the old behavior.
overrides = {'targetapp_minVersion': {guid: target.version},
'targetapp_maxVersion': {guid: target.version}}
validation = run_validator(res.file.file_path, for_appversions=ver,
test_all_tiers=True, overrides=overrides,
compat=True)
except:
task_error = sys.exc_info()
log.exception(
u'bulk_validate_file exception on file {} ({})'
.format(res.file, file_base))
res.completed = datetime.now()
if task_error:
res.task_error = ''.join(traceback.format_exception(*task_error))
else:
res.apply_validation(validation)
log.info('[1@None] File %s (%s) errors=%s'
% (res.file, file_base, res.errors))
res.save()
tally_job_results(res.validation_job.id)
@task
@write
def add_validation_jobs(pks, job_pk, **kw):
log.info('[%s@None] Adding validation jobs for addons starting at: %s '
' for job: %s'
% (len(pks), pks[0], job_pk))
job = ValidationJob.objects.get(pk=job_pk)
curr_ver = job.curr_max_version.version_int
target_ver = job.target_version.version_int
unreviewed_statuses = (amo.STATUS_AWAITING_REVIEW, amo.STATUS_BETA)
for addon in Addon.objects.filter(pk__in=pks):
ids = set()
base = addon.versions.filter(apps__application=job.application,
apps__max__version_int__gte=curr_ver,
apps__max__version_int__lt=target_ver,
channel=amo.RELEASE_CHANNEL_LISTED)
already_compat = addon.versions.filter(
channel=amo.RELEASE_CHANNEL_LISTED,
files__status=amo.STATUS_PUBLIC,
apps__max__version_int__gte=target_ver)
if already_compat.exists():
log.info('Addon %s already has a public version %r which is '
'compatible with target version of app %s %s (or newer)'
% (addon.pk, [v.pk for v in already_compat.all()],
job.application, job.target_version))
continue
try:
public = (base.filter(files__status=amo.STATUS_PUBLIC)
.latest('id'))
except ObjectDoesNotExist:
public = None
if public:
ids.update([f.id for f in public.files.all()])
ids.update(base.filter(files__status__in=unreviewed_statuses,
id__gt=public.id)
.values_list('files__id', flat=True))
else:
ids.update(base.filter(files__status__in=unreviewed_statuses)
.values_list('files__id', flat=True))
log.info('Adding %s files for validation for '
'addon: %s for job: %s' % (len(ids), addon.pk, job_pk))
for id in ids:
result = ValidationResult.objects.create(validation_job_id=job_pk,
file_id=id)
bulk_validate_file.delay(result.pk)
def get_context(addon, version, job, results, fileob=None):
result_links = (absolutify(reverse('devhub.bulk_compat_result',
args=[addon.slug, r.pk]))
for r in results)
addon_name = addon.name
if fileob and fileob.platform != amo.PLATFORM_ALL.id:
addon_name = u'%s (%s)' % (addon_name, fileob.get_platform_display())
return Context({
'ADDON_NAME': addon_name,
'ADDON_VERSION': version.version,
'APPLICATION': str(job.application),
'COMPAT_LINK': absolutify(reverse('devhub.versions.edit',
args=[addon.pk, version.pk])),
'RESULT_LINKS': ' '.join(result_links),
'VERSION': job.target_version.version})
@task
@write
def update_maxversions(version_pks, job_pk, data, **kw):
log.info('[%s@%s] Updating max version for job %s.'
% (len(version_pks), update_maxversions.rate_limit, job_pk))
job = ValidationJob.objects.get(pk=job_pk)
core.set_user(get_task_user())
dry_run = data['preview_only']
app_id = job.target_version.application
stats = collections.defaultdict(int)
stats['processed'] = 0
stats['is_dry_run'] = int(dry_run)
for version in Version.objects.filter(pk__in=version_pks):
stats['processed'] += 1
file_pks = version.files.values_list('pk', flat=True)
errors = (ValidationResult.objects.filter(validation_job=job,
file__pk__in=file_pks)
.values_list('errors', flat=True))
if any(errors):
stats['invalid'] += 1
log.info('Version %s for addon %s not updated, '
'one of the files did not pass validation'
% (version.pk, version.addon.pk))
continue
for app in version.apps.filter(
application=job.curr_max_version.application,
max__version_int__gte=job.curr_max_version.version_int,
max__version_int__lt=job.target_version.version_int):
stats['bumped'] += 1
log.info('Updating version %s%s for addon %s from version %s '
'to version %s'
% (version.pk,
' [DRY RUN]' if dry_run else '',
version.addon.pk,
job.curr_max_version.version,
job.target_version.version))
app.max = job.target_version
if not dry_run:
app.save()
ActivityLog.create(
amo.LOG.MAX_APPVERSION_UPDATED,
version.addon, version,
details={'version': version.version,
'target': job.target_version.version,
'application': app_id})
log.info('[%s@%s] bulk update stats for job %s: {%s}'
% (len(version_pks), update_maxversions.rate_limit, job_pk,
', '.join('%s: %s' % (k, stats[k])
for k in sorted(stats.keys()))))
def _completed_versions(job, prefix=''):
filter = dict(files__validation_results__validation_job=job,
files__validation_results__completed__isnull=False)
if not prefix:
return filter
res = {}
for k, v in filter.iteritems():
res['%s__%s' % (prefix, k)] = v
return res
def updated_versions(job):
return (
Version.objects
.filter(files__validation_results__validation_job=job,
files__validation_results__errors=0,
files__validation_results__completed__isnull=False,
apps__application=job.curr_max_version.application,
apps__max__version_int__gte=job.curr_max_version.version_int,
apps__max__version_int__lt=job.target_version.version_int)
.exclude(files__validation_results__errors__gt=0)
.values_list('pk', flat=True).distinct())
def completed_version_authors(job):
return (
Version.objects
.filter(**_completed_versions(job))
# Prevent sorting by version creation date and
# thereby breaking `.distinct()`.
.order_by('addon__authors__pk')
.values_list('addon__authors__pk', flat=True).distinct())
@task
def notify_compatibility(job, params):
dry_run = params['preview_only']
log.info('[@None] Starting validation email/update process for job %d.'
' dry_run=%s.' % (job.pk, dry_run))
log.info('[@None] Starting validation version bumps for job %d.' % job.pk)
version_list = updated_versions(job)
total = version_list.count()
for chunk in chunked(version_list, 100):
log.info('[%d@%d] Updating versions for job %d.' % (
len(chunk), total, job.pk))
update_maxversions.delay(chunk, job.pk, params)
log.info('[@None] Starting validation email run for job %d.' % job.pk)
updated_authors = completed_version_authors(job)
total = updated_authors.count()
for chunk in chunked(updated_authors, 100):
log.info('[%d@%d] Notifying authors for validation job %d'
% (len(chunk), total, job.pk))
# There are times when you want to punch django's ORM in
# the face. This may be one of those times.
users_addons = list(
UserProfile.objects.filter(pk__in=chunk)
.filter(**_completed_versions(job,
'addons__versions'))
.values_list('pk', 'addons__pk').distinct())
users = list(UserProfile.objects.filter(
pk__in=set(u for u, a in users_addons)))
# Annotate fails in tests when using cached results
addons = (Addon.objects.no_cache()
.filter(**{
'pk__in': set(a for u, a in users_addons),
'versions__files__'
'validation_results__validation_job': job
})
.annotate(errors=Sum(
'versions__files__validation_results__errors')))
addons = dict((a.id, a) for a in addons)
users_addons = dict((u, [addons[a] for u, a in row])
for u, row in sorted_groupby(users_addons,
lambda k: k[0]))
log.info('[%d@%d] Notifying %d authors about %d addons for '
'validation job %d'
% (len(chunk), total, len(users), len(addons.keys()), job.pk))
for u in users:
addons = users_addons[u.pk]
u.passing_addons = [a for a in addons if a.errors == 0]
u.failing_addons = [a for a in addons if a.errors > 0]
notify_compatibility_chunk.delay(users, job, params)
log.info('[@None] Completed validation email/update process '
'for job %d. dry_run=%s.' % (job.pk, dry_run))
@task
@write
def notify_compatibility_chunk(users, job, data, **kw):
log.info('[%s@%s] Sending notification mail for job %s.'
% (len(users), notify_compatibility.rate_limit, job.pk))
core.set_user(get_task_user())
dry_run = data['preview_only']
app_id = job.target_version.application
stats = collections.defaultdict(int)
stats['processed'] = 0
stats['is_dry_run'] = int(dry_run)
for user in users:
stats['processed'] += 1
try:
for addon in chain(user.passing_addons, user.failing_addons):
try:
results = job.result_set.filter(file__version__addon=addon)
addon.links = [
absolutify(reverse('devhub.bulk_compat_result',
args=[addon.slug, r.pk]))
for r in results]
version = (
addon.current_version or addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED))
addon.compat_link = absolutify(reverse(
'devhub.versions.edit', args=[addon.pk, version.pk]))
except:
task_error = sys.exc_info()
log.error(u'Bulk validation email error for user %s, '
u'addon %s: %s: %s'
% (user.email, addon.slug,
task_error[0], task_error[1]), exc_info=False)
context = Context({
'APPLICATION': unicode(amo.APP_IDS[job.application].pretty),
'VERSION': job.target_version.version,
'PASSING_ADDONS': user.passing_addons,
'FAILING_ADDONS': user.failing_addons,
})
log.info(u'Emailing %s%s for %d addons about '
'bulk validation job %s'
% (user.email,
' [PREVIEW]' if dry_run else '',
len(user.passing_addons) + len(user.failing_addons),
job.pk))
args = (Template(data['subject']).render(context),
Template(data['text']).render(context))
kwargs = dict(from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[user.email])
if dry_run:
job.preview_notify_mail(*args, **kwargs)
else:
stats['author_emailed'] += 1
send_mail(*args, **kwargs)
ActivityLog.create(
amo.LOG.BULK_VALIDATION_USER_EMAILED,
user,
details={'passing': [a.id for a in user.passing_addons],
'failing': [a.id for a in user.failing_addons],
'target': job.target_version.version,
'application': app_id})
except:
task_error = sys.exc_info()
log.error(u'Bulk validation email error for user %s: %s: %s'
% (user.email,
task_error[0], task_error[1]), exc_info=False)
log.info('[%s@%s] bulk email stats for job %s: {%s}'
% (len(users), notify_compatibility.rate_limit, job.pk,
', '.join('%s: %s' % (k, stats[k])
for k in sorted(stats.keys()))))
@task
def fetch_langpacks(path, **kw):
log.info('[@None] Fetching language pack updates {0}'.format(path))
# Treat `path` as relative even if it begins with a leading /
base_url = urljoin(settings.LANGPACK_DOWNLOAD_BASE,
'./' + path.strip('/') + '/')
# Find the checksum manifest, 2 directories up.
list_url = urljoin(base_url, settings.LANGPACK_MANIFEST_PATH)
list_base = urljoin(list_url, './')
log.info('[@None] Fetching language pack manifests from {0}'
.format(list_url))
if not list_url.startswith(settings.LANGPACK_DOWNLOAD_BASE):
log.error('[@None] Not fetching language packs from invalid URL: '
'{0}'.format(base_url))
raise ValueError('Invalid path')
try:
req = requests.get(list_url,
verify=settings.CA_CERT_BUNDLE_PATH)
except Exception, e:
log.error('[@None] Error fetching language pack list {0}: {1}'
.format(path, e))
return
xpi_list = [urljoin(list_base, line[-1])
for line in map(str.split, req.iter_lines())]
allowed_file = re.compile(r'^[A-Za-z-]+\.xpi$').match
for url in xpi_list:
# Filter out files not in the target langpack directory.
if not url.startswith(base_url):
continue
xpi = url[len(base_url):]
# Filter out entries other than direct child XPIs.
if not allowed_file(xpi):
continue
fetch_langpack.delay(url, xpi)
@task(rate_limit='6/s')
@write
def fetch_langpack(url, xpi, **kw):
try:
req = requests.get(url,
verify=settings.CA_CERT_BUNDLE_PATH)
if ('content-length' not in req.headers or
int(req.headers['content-length']) >
settings.LANGPACK_MAX_SIZE):
log.error('[@None] Language pack "{0}" too large: {1} > {2}'
.format(xpi, req.headers['content-large'],
settings.LANGPACK_MAX_SIZE))
return
chunks = []
size = 0
for chunk in req.iter_content(settings.LANGPACK_MAX_SIZE):
size += len(chunk)
# `requests` doesn't respect the Content-Length header
# so we need to check twice.
if size > settings.LANGPACK_MAX_SIZE:
raise Exception('Response to big')
chunks.append(chunk)
except Exception, e:
log.error('[@None] Error fetching "{0}" language pack: {1}'
.format(xpi, e))
return
upload = FileUpload()
upload.add_file(chunks, xpi, size)
lang = os.path.splitext(xpi)[0]
# Activate the correct locale for the language pack so it
# will be used as the add-on's default locale if available.
with translation.override(lang):
try:
data = parse_addon(upload, check=False)
allowed_guid = re.compile(r'^langpack-{0}@'
r'[a-z]+\.mozilla\.org$'.format(lang))
assert allowed_guid.match(data['guid']), 'Unexpected GUID'
except Exception, e:
log.error('[@None] Error parsing "{0}" language pack: {1}'
.format(xpi, e),
exc_info=sys.exc_info())
return
try:
addon = Addon.objects.get(guid=data['guid'])
except Addon.DoesNotExist:
addon = None
try:
# Parse again now that we have the add-on.
data = parse_addon(upload, addon)
except Exception, e:
log.error('[@None] Error parsing "{0}" language pack: {1}'
.format(xpi, e),
exc_info=sys.exc_info())
return
if not data['apps']:
# We don't have the app versions that the langpack specifies
# in our approved versions list. Don't create a version for it,
# so we can retry once they've been added.
log.error('[@None] Not creating langpack {guid} {version} because '
'it has no valid compatible apps.'.format(**data))
return
is_beta = amo.VERSION_BETA.search(data['version'])
owner = UserProfile.objects.get(email=settings.LANGPACK_OWNER_EMAIL)
if addon:
if addon.versions.filter(version=data['version']).exists():
log.info('[@None] Version {0} of "{1}" language pack exists'
.format(data['version'], xpi))
return
if not addon.addonuser_set.filter(user=owner).exists():
log.info('[@None] Skipping language pack "{0}": '
'not owned by {1}'.format(
xpi, settings.LANGPACK_OWNER_EMAIL))
return
version = Version.from_upload(upload, addon, [amo.PLATFORM_ALL.id],
amo.RELEASE_CHANNEL_LISTED,
is_beta=is_beta)
log.info('[@None] Updated language pack "{0}" to version {1}'
.format(xpi, data['version']))
else:
if is_beta:
log.error('[@None] Not creating beta version {0} for new '
'"{1}" language pack'.format(data['version'], xpi))
return
if (Addon.objects.filter(name__localized_string=data['name'])
.exists()):
data['old_name'] = data['name']
data['name'] = u'{0} ({1})'.format(
data['old_name'], data['apps'][0].appdata.pretty)
log.warning(u'[@None] Creating langpack {guid}: Add-on with '
u'name {old_name!r} already exists, trying '
u'{name!r}.'.format(**data))
addon = Addon.from_upload(
upload, [amo.PLATFORM_ALL.id], parsed_data=data)
AddonUser(addon=addon, user=owner).save()
version = addon.versions.get()
if addon.default_locale.lower() == lang.lower():
addon.target_locale = addon.default_locale
addon.save()
log.info('[@None] Created new "{0}" language pack, version {1}'
.format(xpi, data['version']))
# Set the category
for app in version.compatible_apps:
static_category = (
CATEGORIES.get(app.id, []).get(amo.ADDON_LPAPP, [])
.get('general'))
if static_category:
category, _ = Category.objects.get_or_create(
id=static_category.id, defaults=static_category.__dict__)
AddonCategory.objects.get_or_create(
addon=addon, category=category)
# Add a license if there isn't one already
if not version.license:
license = License.objects.builtins().get(builtin=1)
version.update(license=license)
file_ = version.files.get()
if not is_beta:
# Not `version.files.update`, because we need to trigger save
# hooks.
file_.update(status=amo.STATUS_PUBLIC)
sign_file(file_, settings.SIGNING_SERVER)
# Finally, set the addon summary if one wasn't provided in the xpi.
addon.status = amo.STATUS_PUBLIC
addon.summary = addon.summary if addon.summary else unicode(addon.name)
addon.save(update_fields=('status', 'summary'))
addon.update_status()
@task
def celery_error(**kw):
"""
This task raises an exception from celery to test error logging and
Sentry hookup.
"""
log.info('about to raise an exception from celery')
raise RuntimeError('this is an exception from celery')
|
{
"content_hash": "1b96a94054e58bc004f4f1d08f450da6",
"timestamp": "",
"source": "github",
"line_count": 630,
"max_line_length": 79,
"avg_line_length": 39.96031746031746,
"alnum_prop": 0.5485203574975174,
"repo_name": "harikishen/addons-server",
"id": "f9ab9821069cc8ab0af8f2345a206f34fd7e96bc",
"size": "25175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/zadmin/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "822508"
},
{
"name": "HTML",
"bytes": "698554"
},
{
"name": "JavaScript",
"bytes": "1087360"
},
{
"name": "Makefile",
"bytes": "811"
},
{
"name": "PLSQL",
"bytes": "990"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "4560536"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "7564"
},
{
"name": "Smarty",
"bytes": "1859"
}
],
"symlink_target": ""
}
|
from nba import enums
from nba.utils import clean_locals
from nba.endpoints.baseendpoint import BaseEndpoint
class Homepage(BaseEndpoint):
def leaders(
self,
idx_data,
league_id=enums.LeagueID.Default,
stat_category=enums.StatCategory.Default,
season=enums.Season.Default,
season_type=enums.SeasonType.Default,
player_or_team=enums.PlayerOrTeam.Default,
game_scope=enums.GameScope.Default,
player_scope=enums.PlayerScope.Default,
):
"""
Get top 5 players/teams by a particular stat.
:param idx_data: the index to retrieve data from json.
:type idx_data: int
:param league_id: league to filter for.
:type league_id: nba.enums.LeagueID
:param stat_category: Stat to sort leaders table by.
:type stat_category: nba.enums.StatCategory
:param season: Season for which to get stat leaders.
:type season: nba.enums.Season
:param season_type: Regular Season or Playoffs.
:type season_type: nba.enums.SeasonType
:param player_or_team: whether to get individual players or by team.
:type player_or_team: nba.enums.PlayerOrTeam
:param game_scope: what games to include in the data.
:type game_scope: nba.enums.GameScope
:param player_scope: filter by rookies only or all players
:type player_scope: nba.enums.PlayerScope
:returns: data for specified filters, as defined below by idx_data.
:rtype: DataFrame
======== ================ ==================================================
idx_data Name Description
======== ================ ==================================================
0 HomePageLeaders Top 5 players/teams by stat specified.
1 LeagueAverage League average of the stat specified.
2 LeagueMax League max of each stat column.
======== ================ ==================================================
"""
params = clean_locals(locals())
endpoint = "homepageleaders"
r = self.request(endpoint, params)
df = self.process_response(r, idx_data, "resultSets")
return df
def other_leaders(
self,
idx_data,
league_id=enums.LeagueID.Default,
stat_type=enums.StatType.Default,
season=enums.Season.Default,
season_type=enums.SeasonType.Default,
player_or_team=enums.PlayerOrTeam.Default,
game_scope=enums.GameScope.Default,
player_scope=enums.PlayerScope.Default,
):
"""
Get top 5 players/teams by a particular stat type.
:param idx_data: the index to retrieve data from json.
:type idx_data: int
:param league_id: league to filter for.
:type league_id: nba.enums.LeagueID
:param stat_type: Stat to sort leaders table by.
:type stat_type: nba.enums.StatType
:param season: Season for which to get stat leaders.
:type season: nba.enums.Season
:param season_type: Regular Season or Playoffs.
:type season_type: nba.enums.SeasonType
:param player_or_team: whether to get individual players or by team.
:type player_or_team: nba.enums.PlayerOrTeam
:param game_scope: what games to include in the data.
:type game_scope: nba.enums.GameScope
:param player_scope: filter by rookies only or all players
:type player_scope: nba.enums.PlayerScope
:returns: top 5 players for given stat type, as defined below by idx_data.
:rtype: DataFrame
======== =============== ==================================================================
idx_data Name Description
======== =============== ==================================================================
0 HomePageStat1 Traditional=PTS, Advanced=OFF_RATING, Tracking=DIST_MILES
1 HomePageStat2 Traditional=REB, Advanced=DEF_RATING, Tracking=AST_POINTS_CREATED
2 HomePageStat3 Traditional=AST, Advanced=NET_RATING, Tracking=DRIVES
3 HomePageStat4 Traditional=STL, Advanced=PIE, Tracking=NUM_TOUCHES
4 HomePageStat5 Traditional=FG_PCT, Advanced=REB_PCT, Tracking=POST_TOUCHES
5 HomePageStat6 Traditional=FT_PCT, Advanced=AST_PCT, Tracking=REB_CONTEST
6 HomePageStat7 Traditional=FG3_PCT, Advanced=TS_PCT, Tracking=CATCH_SHOOT_PTS
7 HomePageStat8 Traditional=BLK, Advanced=EFG_PCT, Tracking=PULL_UP_PTS
======== =============== ==================================================================
"""
params = clean_locals(locals())
endpoint = "homepagev2"
r = self.request(endpoint, params)
df = self.process_response(r, idx_data, "resultSets")
return df
def leaders_tiles(
self,
idx_data,
league_id=enums.LeagueID.Default,
stat=enums.Stat.Default,
season=enums.Season.Default,
season_type=enums.SeasonType.Default,
player_or_team=enums.PlayerOrTeam.Default,
game_scope=enums.GameScope.Default,
player_scope=enums.PlayerScope.Default,
):
"""
Get top 5 players/teams by a particular stat.
:param idx_data: the index to retrieve data from json.
:type idx_data: int
:param league_id: league to filter for.
:type league_id: nba.enums.LeagueID
:param stat: Stat to sort leaders table by.
:type stat: nba.enums.Stat
:param season: Season for which to get stat leaders.
:type season: nba.enums.Season
:param season_type: Regular Season or Playoffs.
:type season_type: nba.enums.SeasonType
:param player_or_team: whether to get individual players or by team.
:type player_or_team: nba.enums.PlayerOrTeam
:param game_scope: what games to include in the data.
:type game_scope: nba.enums.GameScope
:param player_scope: filter by rookies only or all players
:type player_scope: nba.enums.PlayerScope
:returns: data for specified filters, as defined below by idx_data.
:rtype: DataFrame
======== ================== ====================================================
idx_data Name Description
======== ================== ====================================================
0 LeadersTiles Top 5 players/teams by stat specified with id info.
1 AllTimeSeasonHigh Details of the all time high of the stat specified.
2 LastSeasonHigh Details of prior seasons high of stat specified.
3 LastSeasonHigh Details of prior seasons low of stat specified.
======== ================== ====================================================
"""
params = clean_locals(locals())
endpoint = "leaderstiles"
r = self.request(endpoint, params)
df = self.process_response(r, idx_data, "resultSet")
return df
|
{
"content_hash": "967bdca0c07be7b5a6576500f14d422c",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 103,
"avg_line_length": 47.670967741935485,
"alnum_prop": 0.5509541209906618,
"repo_name": "rozzac90/nba",
"id": "501800f1558b64dfc66189845b605ce456ed2688",
"size": "7389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nba/endpoints/homepage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "342311"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division, absolute_import
import os
import re
import phrasedml
try:
import tesedml as libsedml
except ImportError:
import libsedml
class phrasedmlImporter(object):
def __init__(self, sbml_map={}):
""" Constructor. """
self.sedml_str = None
self.sedml_path = None
self.sbml_map = sbml_map
@classmethod
def fromContent(cls, sedml_str, sbml_map={}):
# FIXME: bad hack for https://github.com/fbergmann/libSEDML/issues/47
# test for JWS quirks
if 'xmlns="http://sed-ml.org/sed-ml/level1/version3"' in sedml_str:
# import xml.etree.ElementTree as ElementTree
# root = ElementTree.fromstring(sedml_str)
# for p in root.findall('{http://sed-ml.org/sed-ml/level1/version3}plot2D'):
# if not 'logX' in p.attrib or not 'logY' in p.attrib:
# logX = False
# logY = False
# for l in p.findall('{http://sed-ml.org/sed-ml/level1/version3}listOfCurves'):
# for c in l.findall('{http://sed-ml.org/sed-ml/level1/version3}curve'):
# if 'logX' in c.attrib and c.attrib['logX'].lower() == 'true':
# logX = True
# if 'logY' in c.attrib and c.attrib['logY'].lower() == 'true':
# logY = True
# p.set('logX', logX)
# p.set('logY', logY)
# sedml_str = (ElementTree.tostring(root, encoding='utf8', method='xml')).decode('utf8')
while True:
p = sedml_str.find('plot2D')
if p < 0:
break
b = sedml_str.find('>', p)
if b < 0:
break
l = sedml_str.find('logX', p)
if l < 0 or b < l:
sedml_str = sedml_str[:p] + 'plot2D logX="false" logY="false" ' + sedml_str[p+len('plot2D'):]
else:
break
print(sedml_str)
importer = phrasedmlImporter(sbml_map)
importer.sedml_str = sedml_str
# test for errors
result = importer.toPhrasedml()
if result is None:
# get errors from libsedml
doc = libsedml.SedReader().readSedMLFromString(sedml_str)
if doc.getNumErrors():
max_len = 100
message = doc.getError(doc.getNumErrors()-1).getMessage()
message = message[:max_len] + '...' if len(message) > max_len else message
raise RuntimeError('Errors reading SED-ML: {}'.format(message))
else:
raise RuntimeError('Unable to read SED-ML.')
return importer
def isInRootDir(self, file):
d = os.path.split(file)[0]
return d == '' or d == '.'
def removeFileExt(self, filename):
return os.path.splitext(filename)[0]
def formatResource(self, filename):
""" Normalizes and also strips xml extension."""
return self.removeFileExt(os.path.normpath(filename))
def fixModelRefs(self, phrasedml_str):
''' Changes all references of type myModel.xml to myModel.'''
model_ref = re.compile(r'^.*\s*model\s*"([^"]*)"\s*$')
out_str = ''
for line in phrasedml_str.splitlines():
match = model_ref.match(line)
if match:
filename = match.group(1)
if self.isInRootDir(filename):
line = line.replace(filename,self.formatResource(filename))
out_str += line+'\n'
return out_str
def toPhrasedml(self):
# assign sbml resources
# print('toPhrasedml sbml resources:')
phrasedml.clearReferencedSBML()
for sbml_resource in self.sbml_map:
# print(' {} -> {}'.format(sbml_resource, self.sbml_map[sbml_resource][:30]))
phrasedml.setReferencedSBML(sbml_resource, self.sbml_map[sbml_resource])
# convert to phrasedml
if self.sedml_str:
result = phrasedml.convertString(self.sedml_str)
if result is None:
raise RuntimeError(phrasedml.getLastError())
return self.fixModelRefs(phrasedml.getLastPhraSEDML())
elif self.sedml_path:
result = phrasedml.convertFile(self.sedml_str)
if result is None:
raise RuntimeError(phrasedml.getLastError())
return self.fixModelRefs(phrasedml.getLastPhraSEDML())
|
{
"content_hash": "31940bb7fcdf92061e253a6c3406a189",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 113,
"avg_line_length": 39.99130434782609,
"alnum_prop": 0.5412046096977604,
"repo_name": "kirichoi/tellurium",
"id": "9dff5327f613bf80f46794bbe70f30bd60897414",
"size": "4599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tellurium/teconverters/convert_phrasedml.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4905"
},
{
"name": "Inno Setup",
"bytes": "25153"
},
{
"name": "Jupyter Notebook",
"bytes": "10767"
},
{
"name": "Python",
"bytes": "1120769"
},
{
"name": "Shell",
"bytes": "713"
}
],
"symlink_target": ""
}
|
"""\
Minimum spanning tree by kruskal
jill-jenn vie et christoph durr - 2014-2018
"""
from math import sqrt
import random
# snip{ union-find
class UnionFind:
"""Maintains a partition of {0, ..., n-1}
"""
def __init__(self, n):
self.up_bound = list(range(n))
self.rank = [0] * n
def find(self, x_index):
"""
:returns: identifier of part containing x_index
:complex_indexity: O(inverse_ackerman(n))
"""
if self.up_bound[x_index] == x_index:
return x_index
self.up_bound[x_index] = self.find(self.up_bound[x_index])
return self.up_bound[x_index]
def union(self, x_index, y_index):
"""
Merges part that contain x and part containing y
:returns: False if x_index, y_index are already in same part
:complexity: O(inverse_ackerman(n))
"""
repr_x = self.find(x_index)
repr_y = self.find(y_index)
if repr_x == repr_y: # already in the same component
return False
if self.rank[repr_x] == self.rank[repr_y]:
self.rank[repr_x] += 1
self.up_bound[repr_y] = repr_x
elif self.rank[repr_x] > self.rank[repr_y]:
self.up_bound[repr_y] = repr_x
else:
self.up_bound[repr_x] = repr_y
return True
# snip}
# snip{ kruskal
# pylint: disable=redefined-outer-name, unused-variable
def kruskal(graph, weight):
"""Minimum spanning tree by Kruskal
:param graph: undirected graph in listlist or listdict format
:param weight: in matrix format or same listdict graph
:returns: list of edges of the tree
:complexity: ``O(|E|log|E|)``
"""
u_f = UnionFind(len(graph))
edges = []
for u, _ in enumerate(graph):
for v in graph[u]:
edges.append((weight[u][v], u, v))
edges.sort()
min_span_tree = []
for w_idx, u_idx, v_idx in edges:
if u_f.union(u_idx, v_idx):
min_span_tree.append((u_idx, v_idx))
return min_span_tree
# snip}
def dist(a, b):
"""
distance between point a and point b
"""
return sqrt(sum([(a[i] - b[i]) * (a[i] - b[i])
for i in range(len(a))]))
# pylint: disable=pointless-string-statement
if __name__ == "__main__":
"""
main function
"""
N = 256
points = [[random.random() * 5, random.random() * 5] for _ in range(N)]
weight = [[dist(points[i], points[j]) for j in range(N)]
for i in range(N)]
graph = [[j for j in range(N) if i != j] for i in range(N)]
with open('../data/kruskal-points.tex', 'w') as infile:
min_span_tree = kruskal(graph, weight)
val = 0
for u_idx, v_idx in min_span_tree:
val += weight[u_idx][v_idx]
infile.write('\\draw[blue] (%f, %f) -- (%f, %f);\n'
% tuple(points[u_idx] + points[v_idx]))
for point in points:
infile.write('\\filldraw[black] (%f, %f) circle (1pt);\n'
% tuple(point))
print(val)
|
{
"content_hash": "c4ab27c5cb17851b544581b4d28c8be7",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 75,
"avg_line_length": 29.883495145631066,
"alnum_prop": 0.5425601039636128,
"repo_name": "jilljenn/tryalgo",
"id": "61c5cc198d20671c0f4535499ea8846440ef6b32",
"size": "3125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tryalgo/kruskal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "565"
},
{
"name": "Python",
"bytes": "287432"
}
],
"symlink_target": ""
}
|
"""DAL's Select2 and django-tagging extension."""
|
{
"content_hash": "e1c7acba71455ebfeb68a22b9e9b431c",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 49,
"avg_line_length": 50,
"alnum_prop": 0.72,
"repo_name": "yourlabs/django-autocomplete-light",
"id": "528680999be3571117414a997b931bdc97d1bbab",
"size": "50",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/dal_select2_tagging/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11205"
},
{
"name": "HTML",
"bytes": "5709"
},
{
"name": "JavaScript",
"bytes": "27379"
},
{
"name": "Python",
"bytes": "210537"
},
{
"name": "Shell",
"bytes": "1950"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from datacheck.core import (validate, Validator, Type, List,
Required, Optional, Dict)
__all__ = [
'validate',
'Validator',
'Type',
'List',
'Required',
'Optional',
'Dict',
]
|
{
"content_hash": "3c189d19a68f8ffb8242226ba9b7f637",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 60,
"avg_line_length": 21.470588235294116,
"alnum_prop": 0.547945205479452,
"repo_name": "csdev/datacheck",
"id": "ff9a497107590b6ec2b32983a87544d5423effd7",
"size": "365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datacheck/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31168"
},
{
"name": "Shell",
"bytes": "647"
}
],
"symlink_target": ""
}
|
"""
Created on July 2017
@author: JulienWuthrich
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_absolute_error, accuracy_score, r2_score
from sklearn.model_selection import train_test_split
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier, GradientBoostingClassifier
from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import ElasticNetCV, LassoLarsCV, RidgeCV
from sklearn.naive_bayes import BernoulliNB, GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import KNeighborsRegressor
from xgboost import XGBRegressor, XGBClassifier
from vecstack import stacking
# Read the csv file
data = pd.read_csv("toto2.csv")
regression = True
if regression:
metric = r2_score
else:
metric = accuracy_score
# Split dependants and independant variables
y = data[["predict"]]
X = data.drop("predict", axis=1)
# Split into training and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
# Apply Some Featuring
poly_reg = PolynomialFeatures(degree=1)
# Transform into numpy object
x_train = poly_reg.fit_transform(X_train)
x_test = poly_reg.fit_transform(X_test)
y_test = np.array(y_test.ix[:,0])
y_train = np.array(y_train.ix[:,0])
# define lmodels
lmodels = [LassoLarsCV(copy_X=True, cv=None, eps=2.2204460492503131e-16,
fit_intercept=True, max_iter=500, max_n_alphas=1000, n_jobs=1,
normalize=True, positive=False, precompute='auto', verbose=False), RidgeCV(alphas=(0.1, 1.0, 10.0), cv=None, fit_intercept=True, gcv_mode=None,
normalize=False, scoring=None, store_cv_values=False), XGBRegressor(base_score=0.5, colsample_bylevel=1, colsample_bytree=1, gamma=0,
learning_rate=0.1, max_delta_step=0, max_depth=8,
min_child_weight=6, missing=None, n_estimators=75, nthread=-1,
objective='reg:linear', reg_alpha=0, reg_lambda=1,
scale_pos_weight=1, seed=0, silent=True, subsample=0.8)]
# build the stack level 1
S_train, S_test = stacking(
lmodels, x_train, y_train, x_test,
regression=regression, metric=metric,
n_folds=3, shuffle=True, random_state=0, verbose=1
)
# build model lvel 2
model = DecisionTreeRegressor(criterion='mse', max_depth=10, max_features=None,
max_leaf_nodes=None, min_impurity_split=1e-07,
min_samples_leaf=4, min_samples_split=4,
min_weight_fraction_leaf=0.0, presort=False, random_state=None,
splitter='best')
# Fit the model
model.fit(S_train, y_train)
# Predict
y_pred = model.predict(S_test)
# Scoring
if regression:
print('Score on test set:', mean_absolute_error(y_test, y_pred))
else:
print('Score on test set:', accuracy_score(y_test, y_pred))
print(metric(y_test, y_pred))
|
{
"content_hash": "728b44781a917250f255dbf3cd4490a1",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 149,
"avg_line_length": 34.88505747126437,
"alnum_prop": 0.742998352553542,
"repo_name": "Jwuthri/Mozinor",
"id": "618403ce157025beff23f5dc15817c9a743b2bc1",
"size": "3060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mozinor/example/toto2_stack_model_script.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "220512"
},
{
"name": "Python",
"bytes": "78281"
}
],
"symlink_target": ""
}
|
import pbr.version
PROJECT_NAME = __package__
__version__ = pbr.version.VersionInfo(PROJECT_NAME).version_string()
__all__ = ['__version__', 'PROJECT_NAME']
|
{
"content_hash": "d80a0d111cfdb97d1d9ad84de3de7fe1",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 68,
"avg_line_length": 26.5,
"alnum_prop": 0.6729559748427673,
"repo_name": "shad7/tvrenamer",
"id": "b511ea3800823c8a5c2f0b04180176e9397edc1b",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tvrenamer/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "132376"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.