text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from bulletphysics import *
broadphase = DbvtBroadphase()
collisionConfiguration = DefaultCollisionConfiguration()
dispatcher = CollisionDispatcher(collisionConfiguration)
solver = SequentialImpulseConstraintSolver()
world = DiscreteDynamicsWorld(dispatcher, broadphase, solver,collisionConfiguration)
world.setGravity( Vector3(0, -9.81, 0) )
ground = StaticPlaneShape(Vector3(0,1,0),1)
box0 = BoxShape(Vector3(10, 10, 10));
box1 = BoxShape(Vector3(10, 10, 10));
ground_motionstate = DefaultMotionState( Transform(Quaternion(0,0,0,1), Vector3(0,-10,0)) )
ground_rigidbody_info = RigidBodyConstructionInfo(0, ground_motionstate, ground, Vector3(0,0,0))
ground_rigidbody = RigidBody(ground_rigidbody_info)
world.addRigidBody(ground_rigidbody)
ground_rigidbody.name = "ground"
box0_motionstate = DefaultMotionState( Transform(Quaternion(0,0,0,1), Vector3(0, 100,0)) )
box1_motionstate = DefaultMotionState( Transform(Quaternion(0,0,0,1), Vector3(0, 100,0)) )
box_mass = 100
local_inertia = Vector3(0,0,0)
box0.calculateLocalInertia(box_mass, local_inertia)
box0_rigidbody_info = RigidBodyConstructionInfo(box_mass, box0_motionstate, box0, local_inertia)
box1_rigidbody_info = RigidBodyConstructionInfo(box_mass, box1_motionstate, box1, local_inertia)
box0_rigidbody = RigidBody(box0_rigidbody_info)
box0_rigidbody.name = "box0"
print box0_rigidbody.name
box1_rigidbody = RigidBody(box1_rigidbody_info)
box1_rigidbody.name = "box1"
print box1_rigidbody.name
world.addRigidBody(box0_rigidbody)
world.addRigidBody(box1_rigidbody)
box0_trans = Transform()
box1_trans = Transform()
def cb(world, ts):
nm = world.getDispatcher().getNumManifolds()
print "cb", nm
for i in range(0, nm):
manifold = world.getDispatcher().getManifoldByIndexInternal(i)
print "body0", manifold.getBody0().name
print "body1", manifold.getBody1().name
print("hello", ts, world.getWorldUserInfo())
world.setInternalTickCallback(cb, 17)
for i in range(0, 300):
world.stepSimulation(1/60.0, 10)
box0_motionstate.getWorldTransform(box0_trans)
box1_motionstate.getWorldTransform(box1_trans)
print box0_trans.getOrigin().getY()
print box1_trans.getOrigin().getY()
world.removeRigidBody(box0_rigidbody)
world.removeRigidBody(box1_rigidbody)
world.removeRigidBody(ground_rigidbody)
|
{
"content_hash": "008382e03fcf374b9d0cb159640c13fa",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 96,
"avg_line_length": 34.59701492537314,
"alnum_prop": 0.7661777394305436,
"repo_name": "20tab/pybulletphysics",
"id": "46fe3b098574588649b77ffd0258e00c7f5316a5",
"size": "2318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/collision.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4347"
},
{
"name": "C++",
"bytes": "89643"
},
{
"name": "Python",
"bytes": "19169"
}
],
"symlink_target": ""
}
|
from .. import Provider as BankProvider
class Provider(BankProvider):
"""Implement bank provider for ``es_ES`` locale."""
bban_format = "####################"
country_code = "ES"
|
{
"content_hash": "13e4ecf3671c7ed9827a6bd5e991fc43",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 55,
"avg_line_length": 24.25,
"alnum_prop": 0.5876288659793815,
"repo_name": "joke2k/faker",
"id": "16622baf35fbd9f247fee1c7209380c36a1889b4",
"size": "194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "faker/providers/bank/es_ES/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "291"
},
{
"name": "Python",
"bytes": "7689013"
},
{
"name": "Shell",
"bytes": "880"
}
],
"symlink_target": ""
}
|
"""Abstract classes for simulations which keep track of state vector."""
import abc
from typing import Any, Dict, Sequence, TYPE_CHECKING, Tuple, Generic, TypeVar
import numpy as np
from cirq import ops, study, value
from cirq.sim import simulator, state_vector
if TYPE_CHECKING:
import cirq
TStateVectorStepResult = TypeVar('TStateVectorStepResult', bound='StateVectorStepResult')
class SimulatesIntermediateStateVector(
Generic[TStateVectorStepResult],
simulator.SimulatesAmplitudes,
simulator.SimulatesIntermediateState[
TStateVectorStepResult, 'StateVectorTrialResult', 'StateVectorSimulatorState'
],
metaclass=abc.ABCMeta,
):
"""A simulator that accesses its state vector as it does its simulation.
Implementors of this interface should implement the _base_iterator
method."""
def _create_simulator_trial_result(
self,
params: study.ParamResolver,
measurements: Dict[str, np.ndarray],
final_simulator_state: 'StateVectorSimulatorState',
) -> 'StateVectorTrialResult':
return StateVectorTrialResult(
params=params, measurements=measurements, final_simulator_state=final_simulator_state
)
def compute_amplitudes_sweep(
self,
program: 'cirq.Circuit',
bitstrings: Sequence[int],
params: study.Sweepable,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
) -> Sequence[Sequence[complex]]:
if isinstance(bitstrings, np.ndarray) and len(bitstrings.shape) > 1:
raise ValueError(
'The list of bitstrings must be input as a '
'1-dimensional array of ints. Got an array with '
f'shape {bitstrings.shape}.'
)
trial_results = self.simulate_sweep(program, params, qubit_order)
# 1-dimensional tuples don't trigger advanced Numpy array indexing
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
if isinstance(bitstrings, tuple):
bitstrings = list(bitstrings)
all_amplitudes = []
for trial_result in trial_results:
amplitudes = trial_result.final_state_vector[bitstrings]
all_amplitudes.append(amplitudes)
return all_amplitudes
class StateVectorStepResult(
simulator.StepResult['StateVectorSimulatorState'], metaclass=abc.ABCMeta
):
@abc.abstractmethod
def _simulator_state(self) -> 'StateVectorSimulatorState':
"""Returns the simulator_state of the simulator after this step.
The form of the simulator_state depends on the implementation of the
simulation,see documentation for the implementing class for the form of
details.
"""
raise NotImplementedError()
@value.value_equality(unhashable=True)
class StateVectorSimulatorState:
def __init__(self, state_vector: np.ndarray, qubit_map: Dict[ops.Qid, int]) -> None:
self.state_vector = state_vector
self.qubit_map = qubit_map
self._qid_shape = simulator._qubit_map_to_shape(qubit_map)
def _qid_shape_(self) -> Tuple[int, ...]:
return self._qid_shape
def __repr__(self) -> str:
return (
'cirq.StateVectorSimulatorState('
f'state_vector=np.{self.state_vector!r}, '
f'qubit_map={self.qubit_map!r})'
)
def _value_equality_values_(self) -> Any:
return (self.state_vector.tolist(), self.qubit_map)
@value.value_equality(unhashable=True)
class StateVectorTrialResult(state_vector.StateVectorMixin, simulator.SimulationTrialResult):
"""A `SimulationTrialResult` that includes the `StateVectorMixin` methods.
Attributes:
final_state_vector: The final state vector for the system.
"""
def __init__(
self,
params: study.ParamResolver,
measurements: Dict[str, np.ndarray],
final_simulator_state: StateVectorSimulatorState,
) -> None:
super().__init__(
params=params,
measurements=measurements,
final_simulator_state=final_simulator_state,
qubit_map=final_simulator_state.qubit_map,
)
self.final_state_vector = final_simulator_state.state_vector
def state_vector(self):
"""Return the state vector at the end of the computation.
The state is returned in the computational basis with these basis
states defined by the qubit_map. In particular the value in the
qubit_map is the index of the qubit, and these are translated into
binary vectors where the last qubit is the 1s bit of the index, the
second-to-last is the 2s bit of the index, and so forth (i.e. big
endian ordering).
Example:
qubit_map: {QubitA: 0, QubitB: 1, QubitC: 2}
Then the returned vector will have indices mapped to qubit basis
states like the following table
| | QubitA | QubitB | QubitC |
| :-: | :----: | :----: | :----: |
| 0 | 0 | 0 | 0 |
| 1 | 0 | 0 | 1 |
| 2 | 0 | 1 | 0 |
| 3 | 0 | 1 | 1 |
| 4 | 1 | 0 | 0 |
| 5 | 1 | 0 | 1 |
| 6 | 1 | 1 | 0 |
| 7 | 1 | 1 | 1 |
"""
return self._final_simulator_state.state_vector.copy()
def _value_equality_values_(self):
measurements = {k: v.tolist() for k, v in sorted(self.measurements.items())}
return (self.params, measurements, self._final_simulator_state)
def __str__(self) -> str:
samples = super().__str__()
final = self.state_vector()
if len([1 for e in final if abs(e) > 0.001]) < 16:
state_vector = self.dirac_notation(3)
else:
state_vector = str(final)
return f'measurements: {samples}\noutput vector: {state_vector}'
def _repr_pretty_(self, p: Any, cycle: bool) -> None:
"""Text output in Jupyter."""
if cycle:
# There should never be a cycle. This is just in case.
p.text('StateVectorTrialResult(...)')
else:
p.text(str(self))
def __repr__(self) -> str:
return (
f'cirq.StateVectorTrialResult(params={self.params!r}, '
f'measurements={self.measurements!r}, '
f'final_simulator_state={self._final_simulator_state!r})'
)
|
{
"content_hash": "7184a3467286c59d66656d59a5ab5328",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 97,
"avg_line_length": 36.24175824175824,
"alnum_prop": 0.6035476046088538,
"repo_name": "balopat/Cirq",
"id": "4e6e013833ea279547a1b9e9aa9b015eb0e67f4b",
"size": "7180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cirq-core/cirq/sim/state_vector_simulator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "5923"
},
{
"name": "HTML",
"bytes": "262"
},
{
"name": "Jupyter Notebook",
"bytes": "23905"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "6256825"
},
{
"name": "Shell",
"bytes": "50383"
},
{
"name": "Starlark",
"bytes": "5979"
}
],
"symlink_target": ""
}
|
"""Multitask models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import base_layer
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import base_model
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import py_utils
class SharedEncoderModel(base_model.MultiTaskModel):
"""Multitask model that shares encoder between tasks."""
@classmethod
def Params(cls):
p = super(SharedEncoderModel, cls).Params()
p.Define('encoder_to_share', None,
'The task name whose encoder should be shared.')
return p
@base_layer.initializer
def __init__(self, params):
super(SharedEncoderModel, self).__init__(params)
p = self.params
assert p.encoder_to_share in self.task_names
# Assign the encoder from p.encoder_to_share task to all other tasks.
# The assumption here is that all of the variables are initialized in the
# __init__ function of the Task, and that they are connected together later
# in the FProp function. So, here, after all of the Tasks have been
# initialized, we can change references around to share what we need, and
# later when FProp is called everything will be connected properly.
encoder = self.GetTask(p.encoder_to_share).encoder
for name in self.task_names:
if name != p.encoder_to_share:
task = self.GetTask(name)
assert 'encoder' not in task.children
task.AddChild('encoder', encoder)
class SharedEncoderDecoderModel(base_model.MultiTaskModel):
"""Multitask model that shares both encoder and decoder between tasks."""
@classmethod
def Params(cls):
p = super(SharedEncoderDecoderModel, cls).Params()
p.Define('encoder_to_share', None,
'The task name whose encoder should be shared.')
p.Define('decoder_to_share', None,
'The task name whose decoder should be shared.')
return p
@base_layer.initializer
def __init__(self, params):
super(SharedEncoderDecoderModel, self).__init__(params)
p = self.params
assert p.encoder_to_share in self.task_names
assert p.decoder_to_share in self.task_names
# Assign the encoder from p.encoder_to_share task to all other tasks, and
# assign the decoder from p.decoder_to_share task to all other tasks.
encoder = self.GetTask(p.encoder_to_share).encoder
decoder = self.GetTask(p.decoder_to_share).decoder
for name in self.task_names:
if name != p.encoder_to_share:
task = self.GetTask(name)
assert 'encoder' not in task.children
task.AddChild('encoder', encoder)
if name != p.decoder_to_share:
task = self.GetTask(name)
assert 'decoder' not in task.children
task.AddChild('decoder', decoder)
class RegExSharedVariableModel(base_model.MultiTaskModel):
"""Multitask models that share variables across different tasks.
Note, do NOT use this model unless you know exactly what you are trying to do
and you have verified that it indeed achieves what you would have expected.
"""
@classmethod
def Params(cls):
p = super(RegExSharedVariableModel, cls).Params()
p.Define(
'variable_renaming_rules', None,
'A list/tuple of variable renaming rules. Each element in the'
' list is a pair of strings. The first element is a regex'
' expression while the second element is a python format string.')
return p
@base_layer.initializer
def __init__(self, params):
# Enable variable sharing.
p = params
with py_utils.OpportunisticVariableReuseScope():
with py_utils.VariableRenameScope(p.variable_renaming_rules):
super(RegExSharedVariableModel, self).__init__(params)
def ConstructFPropBPropGraph(self):
# We need to override this since constructing the BPropGraph
# creates slot variables.
p = self._params
with py_utils.OpportunisticVariableReuseScope():
with py_utils.VariableRenameScope(p.variable_renaming_rules):
super(RegExSharedVariableModel, self).ConstructFPropBPropGraph()
|
{
"content_hash": "f179de19945d09676a84e360a79131b2",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 100,
"avg_line_length": 38.72477064220183,
"alnum_prop": 0.7093105899076049,
"repo_name": "mlperf/training_results_v0.7",
"id": "58f425fb9dcbaf95c9602686f2722ee5dd8011f8",
"size": "4910",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Google/benchmarks/transformer/implementations/transformer-research-TF-tpu-v3-8192/lingvo/core/multitask_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Awk",
"bytes": "14530"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "172914"
},
{
"name": "C++",
"bytes": "13037795"
},
{
"name": "CMake",
"bytes": "113458"
},
{
"name": "CSS",
"bytes": "70255"
},
{
"name": "Clojure",
"bytes": "622652"
},
{
"name": "Cuda",
"bytes": "1974745"
},
{
"name": "Dockerfile",
"bytes": "149523"
},
{
"name": "Groovy",
"bytes": "160449"
},
{
"name": "HTML",
"bytes": "171537"
},
{
"name": "Java",
"bytes": "189275"
},
{
"name": "JavaScript",
"bytes": "98224"
},
{
"name": "Julia",
"bytes": "430755"
},
{
"name": "Jupyter Notebook",
"bytes": "11091342"
},
{
"name": "Lua",
"bytes": "17720"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "215967"
},
{
"name": "Perl",
"bytes": "1551186"
},
{
"name": "PowerShell",
"bytes": "13906"
},
{
"name": "Python",
"bytes": "36943114"
},
{
"name": "R",
"bytes": "134921"
},
{
"name": "Raku",
"bytes": "7280"
},
{
"name": "Ruby",
"bytes": "4930"
},
{
"name": "SWIG",
"bytes": "140111"
},
{
"name": "Scala",
"bytes": "1304960"
},
{
"name": "Shell",
"bytes": "1312832"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "Starlark",
"bytes": "69877"
},
{
"name": "TypeScript",
"bytes": "243012"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.databases import uniprot_client
from indra.util import unicode_strs
from nose.plugins.attrib import attr
@attr('webservice')
def test_query_protein_exists():
g = uniprot_client.query_protein('P00533')
assert g is not None
@attr('webservice')
def test_query_protein_nonexist():
g = uniprot_client.query_protein('XXXX')
assert g is None
@attr('webservice')
def test_query_protein_deprecated():
g = uniprot_client.query_protein('Q8NHX1')
assert g is not None
gene_name = uniprot_client.get_gene_name('Q8NHX1')
assert gene_name == 'MAPK3'
assert unicode_strs(gene_name)
gene_name = uniprot_client.get_gene_name('Q8NHX1', web_fallback=False)
assert gene_name == 'MAPK3'
assert unicode_strs(gene_name)
@attr('webservice')
def test_get_family_members():
members = uniprot_client.get_family_members('RAF')
assert 'ARAF' in members
assert 'BRAF' in members
assert 'RAF1' in members
assert unicode_strs(members)
def test_get_gene_name_human():
gene_name = uniprot_client.get_gene_name('P00533')
assert gene_name == 'EGFR'
assert unicode_strs(gene_name)
'''
The below test can be used as a template
to test for entries missing from the resource
file that are available in the web service. It is only relevant if the
resource file is not up to date.
def test_get_gene_name_no_table_entry():
gene_name = uniprot_client.get_gene_name('P01814', web_fallback=True)
assert gene_name == 'IGHV2-70'
assert unicode_strs(gene_name)
gene_name = uniprot_client.get_gene_name('P01814', web_fallback=False)
assert gene_name is None
'''
def test_get_synonyms():
upid = 'Q02750' # This is MAP2K1
gene_synonyms = uniprot_client.get_gene_synonyms(upid)
assert gene_synonyms, gene_synonyms
assert 'MEK1' in gene_synonyms
protein_synonyms = uniprot_client.get_protein_synonyms(upid)
assert protein_synonyms, protein_synonyms
assert 'MKK1' in protein_synonyms
all_synonyms = uniprot_client.get_synonyms(upid)
assert set(gene_synonyms + protein_synonyms) == set(all_synonyms)
assert 'MAP2K1' in all_synonyms
def test_get_gene_name_nonhuman():
gene_name = uniprot_client.get_gene_name('P31938')
assert gene_name == 'Map2k1'
assert unicode_strs(gene_name)
def test_get_gene_name_unreviewed():
gene_name = uniprot_client.get_gene_name('X6RK18', web_fallback=False)
assert gene_name == 'EXO5'
assert unicode_strs(gene_name)
@attr('webservice')
def test_get_gene_name_no_gene_name():
gene_name = uniprot_client.get_gene_name('P04434', web_fallback=False)
assert gene_name is None
gene_name = uniprot_client.get_gene_name('P04434', web_fallback=True)
assert gene_name is None
def test_get_gene_name_multiple_gene_names():
gene_name = uniprot_client.get_gene_name('Q5VWM5')
assert gene_name == 'PRAMEF9'
def test_is_human():
assert uniprot_client.is_human('P00533')
def test_not_is_human():
assert not uniprot_client.is_human('P31938')
def test_noentry_is_human():
assert not uniprot_client.is_human('XXXX')
@attr('webservice')
def test_get_sequence():
seq = uniprot_client.get_sequence('P00533')
assert len(seq) > 1000
assert unicode_strs(seq)
@attr('webservice')
def test_get_modifications():
mods = uniprot_client.get_modifications('P27361')
assert ('Phosphothreonine', 202) in mods
assert ('Phosphotyrosine', 204) in mods
assert unicode_strs(mods)
@attr('webservice')
def test_verify_location():
assert uniprot_client.verify_location('P27361', 'T', 202)
assert not uniprot_client.verify_location('P27361', 'S', 202)
assert not uniprot_client.verify_location('P27361', 'T', -1)
assert not uniprot_client.verify_location('P27361', 'T', 10000)
def test_get_mnemonic():
mnemonic = uniprot_client.get_mnemonic('Q02750')
assert mnemonic == 'MP2K1_HUMAN'
assert unicode_strs(mnemonic)
def test_is_secondary_primary():
assert not uniprot_client.is_secondary('Q02750')
def test_is_secondary_secondary():
assert uniprot_client.is_secondary('Q96J62')
def test_get_primary_id_primary():
assert uniprot_client.get_primary_id('Q02750') == 'Q02750'
def test_get_primary_id_secondary_hashuman():
assert uniprot_client.get_primary_id('Q96J62') == 'P61978'
def test_get_primary_id_secondary_nohuman():
assert uniprot_client.get_primary_id('P31848') in \
['P0A5M5', 'P9WIU6', 'P9WIU7']
def test_mouse_from_up():
assert uniprot_client.get_mgi_id('P28028') == '88190'
def test_up_from_mouse():
assert uniprot_client.get_id_from_mgi('88190') == 'P28028'
def test_rat_from_up():
assert uniprot_client.get_rgd_id('O08773') == '620003'
def test_up_from_rat():
assert uniprot_client.get_id_from_rgd('620003') == 'O08773'
def test_mouse_from_human():
assert uniprot_client.get_mouse_id('P15056') == 'P28028'
def test_rat_from_human():
assert uniprot_client.get_rat_id('P04049') == 'P11345'
def test_length():
assert uniprot_client.get_length('P15056') == 766
@attr('webservice')
def test_get_function():
fun = uniprot_client.get_function('P15056')
assert fun.startswith('Protein kinase involved in the transduction')
|
{
"content_hash": "10b331b8db871963a3f1af6965ef10af",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 74,
"avg_line_length": 28.015706806282722,
"alnum_prop": 0.6991216595028966,
"repo_name": "pvtodorov/indra",
"id": "ab96a5e15b80fdc6e27b4da13c21fdfc3b90ebe0",
"size": "5351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indra/tests/test_uniprot_client.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "169"
},
{
"name": "HTML",
"bytes": "17236"
},
{
"name": "JavaScript",
"bytes": "72960"
},
{
"name": "Python",
"bytes": "2660313"
},
{
"name": "Shell",
"bytes": "381"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os
import argparse
import sys
import subprocess
import shutil
import threading
# from collections import defaultdict
# from subprocess import CalledProcessError
# make sure scripts/internal is on the pythonpath.
sys.path = [os.path.abspath(os.path.dirname(sys.argv[0])) + "/internal"] + sys.path
from prune_size_model import PruneSizeModel
# for ExitProgram and RunCommand
from pocolm_common import ExitProgram
from pocolm_common import RunCommand
from pocolm_common import GetCommandStdout
from pocolm_common import LogMessage
parser = argparse.ArgumentParser(description="This script takes an lm-dir, as produced by make_lm_dir.py, "
"that should not have the counts split up into pieces, and it prunes "
"the counts and writes out to a new lm-dir.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--steps", type=str,
default='prune*0.25 EM EM EM prune*0.5 EM EM EM prune*1.0 EM EM EM prune*1.0 EM EM EM',
help='This string specifies a sequence of steps in the pruning sequence.'
'prune*X, with X <= 1.0, tells it to prune with X times the threshold '
'specified with the --final-threshold option. EM specifies one iteration of '
'E-M on the model. ')
parser.add_argument("--final-threshold", type=float,
help="Threshold for pruning, e.g. 0.5, 1.0, 2.0, 4.0.... "
"larger threshold will give you more highly-pruned models."
"Threshold is interpreted as entropy-change times overall "
"weighted data count, for each parameter. It should be "
"larger if you have more data, assuming you want the "
"same-sized models. "
"This is only relevant if --target-num-ngrams is not specified.")
parser.add_argument("--target-num-ngrams", type=int, default=0,
help="Target num-ngrams of final LM after pruning. "
"If setting this to a positive value, the --steps would be "
"ignored and a few steps may be worked out util the num-ngrams "
"of pruned LM match the target-num-ngrams.")
parser.add_argument("--target-lower-threshold", type=int,
help="lower tolerance of target num-ngrams. Default value is"
"5% relativly less than target num-ngrams. "
"This is only relevant if --target-num-ngrams is specified.")
parser.add_argument("--target-upper-threshold", type=int,
help="upper tolerance of target num-ngrams. Default value is"
"5% relativly larger than target num_ngrams. "
"This is only relevant if --target-num-ngrams is specified.")
parser.add_argument("--initial-threshold", type=float, default=0.25,
help="Initial threshold for the pruning steps starting from. "
"This is only relevant if --target-num-ngrams is specified.")
parser.add_argument("--max-iter", type=int, default=20,
help="Max iterations allowed to find the threshold for target-num-ngrams LM. "
"This is only relevant if --target-num-ngrams is specified.")
parser.add_argument("--verbose", type=str, default='false',
choices=['true', 'false'],
help="If true, print commands as we execute them.")
parser.add_argument("--cleanup", type=str, choices=['true', 'false'],
default='true', help='Set this to false to disable clean up of the '
'work directory.')
parser.add_argument("--remove-zeros", type=str, choices=['true', 'false'],
default='true', help='Set this to false to disable an optimization. '
'Only useful for debugging purposes.')
parser.add_argument("--check-exact-divergence", type=str, choices=['true', 'false'],
default='true', help='')
parser.add_argument("--max-memory", type=str, default='',
help="Memory limitation for sort.")
parser.add_argument("lm_dir_in",
help="Source directory, for the input language model.")
parser.add_argument("lm_dir_out",
help="Output directory where the language model is created.")
args = parser.parse_args()
# Add the script dir and the src dir to the path.
os.environ['PATH'] = (os.environ['PATH'] + os.pathsep +
os.path.abspath(os.path.dirname(sys.argv[0])) + os.pathsep +
os.path.abspath(os.path.dirname(sys.argv[0])) + "/../src")
if os.system("validate_lm_dir.py " + args.lm_dir_in) != 0:
ExitProgram("failed to validate input LM-dir")
# verify the input string max_memory
if args.max_memory != '':
# valid string max_memory must have at least two items
if len(args.max_memory) >= 2:
s = args.max_memory
# valid string max_memory can be formatted as:
# "a positive integer + a letter or a '%'" or "a positive integer"
# the unit of memory size can also be 'T', 'P', 'E', 'Z', or 'Y'. They
# are not included here considering their rare use in practice
if s[-1] in ['b', 'B', '%', 'k', 'K', 'm', 'M', 'g', 'G'] or s[-1].isdigit():
for x in s[:-1]:
if not x.isdigit():
sys.exit("prune_lm_dir.py: --max-memory should be formatted as "
"'a positive integer' or 'a positive integer appended "
"with 'b', 'K', 'M','G', or '%''.")
# max memory size must be larger than zero
if int(s[:-1]) == 0:
sys.exit("prune_lm_dir.py: --max-memory must be > 0 {unit}.".format(
unit=s[-1]))
else:
sys.exit("prune_lm_dir.py: the format of string --max-memory is not correct.")
else:
sys.exit("prune_lm_dir.py: the lenght of string --max-memory must >= 2.")
if args.max_memory[-1] == 'B': # sort seems not recognize 'B'
args.max_memory[-1] = 'b'
num_splits = None
if os.path.exists(args.lm_dir_in + "/num_splits"):
f = open(args.lm_dir_in + "/num_splits")
num_splits = int(f.readline())
f.close()
work_dir = args.lm_dir_out + "/work"
if args.target_num_ngrams > 0:
if args.target_lower_threshold is not None:
if args.target_lower_threshold >= args.target_num_ngrams:
ExitProgram("--target-lower-threshold[{0}] should be less than "
"--target-num-ngrams[{1}].".format(
args.target_lower_threshold, args.target_num_ngrams))
else:
args.target_lower_threshold = int(0.95 * args.target_num_ngrams)
if args.target_upper_threshold is not None:
if args.target_upper_threshold <= args.target_num_ngrams:
ExitProgram("--target-upper-threshold[{0}] should be larger than "
"--target-num-ngrams[{1}].".format(
args.target_upper_threshold, args.target_num_ngrams))
else:
args.target_upper_threshold = int(1.05 * args.target_num_ngrams)
if args.max_iter <= 1:
ExitProgram("--max-iter must be bigger than 1, got: " + str(args.max_iter))
steps = []
else:
if args.final_threshold <= 0.0:
ExitProgram("--final-threshold must be positive, got: " + str(args.final_threshold))
steps = args.steps.split()
if len(steps) == 0:
ExitProgram("'steps' cannot be empty.")
# set the memory restriction for "sort"
sort_mem_opt = ''
if args.max_memory != '':
sort_mem_opt = ("--buffer-size={0} ".format(args.max_memory))
# returns num-words in this lm-dir.
def GetNumWords(lm_dir_in):
command = "tail -n 1 {0}/words.txt".format(lm_dir_in)
line = subprocess.check_output(command, shell=True, universal_newlines=True)
try:
a = line.split()
assert len(a) == 2
ans = int(a[1])
except:
ExitProgram("error: unexpected output '{0}' from command {1}".format(
line, command))
return ans
def GetNgramOrder(lm_dir_in):
f = open(lm_dir_in + "/ngram_order")
return int(f.readline())
def GetNumGrams(lm_dir_in):
num_unigrams = 0
# we generally use num_xgrams to refer to num_ngrams - num_unigrams
tot_num_xgrams = 0
f = open(lm_dir_in + "/num_ngrams")
for order, line in enumerate(f):
if order == 0:
num_unigrams = int(line.split()[1])
continue
tot_num_xgrams += int(line.split()[1])
return (num_unigrams, tot_num_xgrams)
# This script creates work/protected.all (listing protected
# counts which may not be removed); it requires work/float.all
# to exist.
def CreateProtectedCounts(work):
command = ("bash -c 'float-counts-to-histories <{0}/float.all | LC_ALL=C sort {1}|"
" histories-to-null-counts >{0}/protected.all'".format(work, sort_mem_opt))
log_file = work + "/log/create_protected_counts.log"
RunCommand(command, log_file, args.verbose == 'true')
def SoftLink(src, dest):
if os.path.lexists(dest):
os.remove(dest)
try:
os.symlink(os.path.abspath(src), dest)
except:
ExitProgram("error linking {0} to {1}".format(os.path.abspath(src), dest))
def CreateInitialWorkDir():
# Creates float.all, stats.all, and protected.all in work_dir/step
work0dir = work_dir + "/step0"
# create float.all
if not os.path.isdir(work0dir + "/log"):
os.makedirs(work0dir + "/log")
SoftLink(args.lm_dir_in + "/num_ngrams", work0dir + "/num_ngrams")
if num_splits is None:
SoftLink(args.lm_dir_in + "/float.all", work0dir + "/float.all")
else:
splits_star = ' '.join([args.lm_dir_in + "/float.all." + str(n)
for n in range(1, num_splits + 1)])
command = "merge-float-counts " + splits_star + " >{0}/float.all".format(work0dir)
log_file = work0dir + "/log/merge_initial_float_counts.log"
RunCommand(command, log_file, args.verbose == 'true')
# create protected.all
CreateProtectedCounts(work0dir)
stats_star = ' '.join(["{0}/stats.{1}".format(work0dir, n)
for n in range(1, ngram_order + 1)])
# create stats.{1,2,3..}
# e.g. command = 'float-counts-to-float-stats 20000 foo/work/step0/stats.1 '
# 'foo/work/step0/stats.2 <foo/work/step0/float.all'
command = ("float-counts-to-float-stats {0} ".format(num_words) +
stats_star +
" <{0}/float.all".format(work0dir))
log_file = work0dir + "/log/float_counts_to_float_stats.log"
RunCommand(command, log_file, args.verbose == 'true')
command = "merge-float-counts {0} > {1}/stats.all".format(
stats_star, work0dir)
log_file = work0dir + "/log/merge_float_counts.log"
RunCommand(command, log_file, args.verbose == 'true')
for f in stats_star.split():
os.remove(f)
# sets initial_logprob_per_word.
def GetInitialLogprob():
work0dir = work_dir + "/step0"
float_star = ' '.join(['/dev/null' for n in range(1, ngram_order + 1)])
command = ('float-counts-estimate {num_words} {work0dir}/float.all '
'{work0dir}/stats.all {float_star} '.format(
num_words=num_words, work0dir=work0dir,
float_star=float_star))
try:
print(command, file=sys.stderr)
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True, universal_newlines=True)
# the stdout of this program will be something like:
# 1.63388e+06 -7.39182e+06 10.5411 41.237 49.6758
# representing: total-count, total-like, and for each order, the like-change
# for that order.
line = p.stdout.readline()
print(line, file=sys.stderr)
a = line.split()
tot_count = float(a[0])
tot_like = float(a[1])
like_change = 0.0
logprob_per_word = tot_like / tot_count
for i in range(2, len(a)): # for each n-gram order
like_change += float(a[i])
like_change_per_word = like_change / tot_count
assert like_change_per_word < 0.0001 # should be exactly zero.
except Exception as e:
ExitProgram("error running command '{0}', error is '{1}'".format(
command, repr(e)))
global initial_logprob_per_word
initial_logprob_per_word = logprob_per_word
def WriteNumNgrams(out_dir, num_ngrams):
out_file = out_dir + "/num_ngrams"
try:
f = open(out_file, "w")
for order, num in enumerate(num_ngrams):
print(str(order + 1) + ' ' + str(num), file=f)
f.close()
except:
ExitProgram("error writing num-ngrams to: " + out_file)
def RunPruneStep(work_in, work_out, threshold):
# set float_star = 'work_out/float.1 work_out/float.2 ...'
float_star = " ".join(['{0}/float.{1}'.format(work_out, n)
for n in range(1, ngram_order + 1)])
# create work_out/float.{1,2,..}
log_file = work_out + '/log/float_counts_prune.log'
command = ("float-counts-prune {threshold} {num_words} {work_in}/float.all "
"{work_in}/protected.all {float_star} 2>>{log_file}".format(
threshold=threshold, num_words=num_words,
work_in=work_in, float_star=float_star, log_file=log_file))
with open(log_file, 'w') as f:
print("# " + command, file=f)
try:
print(command, file=sys.stderr)
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True, universal_newlines=True)
[word_count, like_change] = p.stdout.readline().split()
like_change_per_word = float(like_change) / float(word_count)
[tot_xgrams, shadowed, protected, pruned] = p.stdout.readline().split()
num_ngrams = p.stdout.readline().split()
assert p.stdout.readline() == ''
ret = p.wait()
assert ret == 0
global current_num_xgrams
current_num_xgrams = int(tot_xgrams) - int(pruned)
except Exception as e:
ExitProgram("error running command '{0}', error is '{1}'".format(
command, repr(e)))
WriteNumNgrams(work_out, num_ngrams)
if args.remove_zeros == 'false':
# create work_out/float.all.
command = 'merge-float-counts {0} >{1}/float.all'.format(float_star,
work_out)
log_file = work_out + '/log/merge_float_counts.log'
RunCommand(command, log_file, args.verbose == 'true')
for f in float_star.split():
os.remove(f)
# soft-link work_out/stats.all to work_in/stats.all
SoftLink(work_in + "/stats.all",
work_out + "/stats.all")
else:
# in this case we pipe the output of merge-float-counts into
# float-counts-stats-remove-zeros.
# set stats_star = 'work_out/stats.1 work_out/stats.2 ..'
stats_star = " ".join(['{0}/stats.{1}'.format(work_out, n)
for n in range(1, ngram_order + 1)])
command = ('merge-float-counts {float_star} | float-counts-stats-remove-zeros '
'{num_words} /dev/stdin {work_in}/stats.all {work_out}/float.all '
'{stats_star}'.format(
num_words=num_words, float_star=float_star,
work_in=work_in, work_out=work_out,
stats_star=stats_star))
log_file = work_out + '/log/remove_zeros.log'
RunCommand(command, log_file, args.verbose == 'true')
# create work_out/stats.all
command = 'merge-float-counts {0} >{1}/stats.all'.format(stats_star, work_out)
log_file = work_out + '/log/merge_float_counts.log'
RunCommand(command, log_file, args.verbose == 'true')
for f in float_star.split() + stats_star.split():
os.remove(f)
# create work_out/protected.all
CreateProtectedCounts(work_out)
return like_change_per_word
def RunEmStep(work_in, work_out):
# set float_star = 'work_out/float.1 work_out/float.2 ...'
float_star = " ".join(['{0}/float.{1}'.format(work_out, n)
for n in range(1, ngram_order + 1)])
command = ('float-counts-estimate {num_words} {work_in}/float.all {work_in}/stats.all '
'{float_star}'.format(num_words=num_words, work_in=work_in,
float_star=float_star))
log_file = work_out + "/log/float_counts_estimate.log"
try:
output = GetCommandStdout(command, log_file, args.verbose == 'true')
# the stdout of this program will be something like:
# 1.63388e+06 -7.39182e+06 10.5411 41.237 49.6758
# representing: total-count, total-like, and for each order, the like-change
# for that order.
a = output.split()
tot_count = float(a[0])
tot_like = float(a[1])
like_change = 0.0
global final_logprob_per_word
final_logprob_per_word = tot_like / tot_count
for i in range(2, len(a)): # for each n-gram order
like_change += float(a[i])
like_change_per_word = like_change / tot_count
except Exception as e:
ExitProgram("error running command '{0}', error is '{1}'".format(
command, repr(e)))
command = 'merge-float-counts {0} >{1}/float.all'.format(float_star, work_out)
log_file = work_out + '/log/merge_float_counts.log'
RunCommand(command, log_file, args.verbose == 'true')
for f in float_star.split():
os.remove(f)
# soft-link work_out/stats.all to work_in/stats.all
SoftLink(work_in + "/stats.all",
work_out + "/stats.all")
# soft-link work_out/protected.all to work_in/protected.all
SoftLink(work_in + "/protected.all",
work_out + "/protected.all")
SoftLink(work_in + "/num_ngrams",
work_out + "/num_ngrams")
return like_change_per_word
# runs one of the numbered steps. step_number >= 0 is the number of the work
# directory we'll get the input from (the output will be that plus one).
# returns the expected log-prob change (on data generated from the model
# itself.. this will be negative for pruning steps and positive for E-M steps.
def RunStep(step_number, threshold, **kwargs):
if 'in_step' in kwargs:
work_in = work_dir + "/step" + str(kwargs['in_step'])
else:
work_in = work_dir + "/step" + str(step_number)
work_out = work_dir + "/step" + str(step_number + 1)
if not os.path.isdir(work_out + "/log"):
os.makedirs(work_out + "/log")
step_text = steps[step_number]
if step_text[0:6] == 'prune*':
try:
scale = float(step_text[6:])
assert scale != 0.0
except:
ExitProgram("invalid step (wrong --steps "
"option): '{0}'".format(step_text))
return RunPruneStep(work_in, work_out, threshold * scale)
elif step_text == 'EM':
return RunEmStep(work_in, work_out)
else:
ExitProgram("invalid step (wrong --steps "
"option): '{0}'".format(step_text))
def FinalizeOutput(final_work_out):
try:
shutil.move(final_work_out + "/float.all",
args.lm_dir_out + "/float.all")
except:
ExitProgram("error moving {0}/float.all to {1}/float.all".format(
final_work_out, args.lm_dir_out))
try:
shutil.copy(final_work_out + "/num_ngrams",
args.lm_dir_out + "/num_ngrams")
except:
ExitProgram("error copying {0}/num_ngrams to {1}/num_ngrams".format(
final_work_out, args.lm_dir_out))
f = open(args.lm_dir_out + "/was_pruned", "w")
print("true", file=f)
f.close()
for f in ['names', 'words.txt', 'ngram_order', 'metaparameters']:
try:
shutil.copy(args.lm_dir_in + "/" + f,
args.lm_dir_out + "/" + f)
except:
ExitProgram("error copying {0}/{1} to {2}/{1}".format(
args.lm_dir_in, f, args.lm_dir_out))
if os.path.exists(args.lm_dir_out + "/num_splits"):
os.remove(args.lm_dir_out + "/num_splits")
# find threshold in order to match the target-num-ngrams with final LM
# using PruneSizeModel
# this will return a tuple of (threshold, num_iterations), if we overshot with
# the initial_threshold, it will return (0.0, 0)
def FindThreshold(initial_threshold):
global initial_num_xgrams, current_num_xgrams, num_unigrams, steps
global logprob_changes, effective_logprob_changes
model = PruneSizeModel(num_unigrams, args.target_num_ngrams,
args.target_lower_threshold, args.target_upper_threshold)
# model.SetDebug(True)
model.SetInitialThreshold(initial_threshold, initial_num_xgrams)
cur_threshold = initial_threshold
backtrack_iter = 0
step = 0
iter2step = [0] # This maps a iter-index to the step-index of the last step of that iteration
while True:
steps += ['prune*1.0']
logprob_change = RunStep(step, cur_threshold, in_step=iter2step[backtrack_iter])
logprob_changes.append(logprob_change)
effective_logprob_changes.append(logprob_change)
thresholds.append(cur_threshold)
step += 1
(action, arguments) = model.GetNextAction(current_num_xgrams)
if action == 'overshoot':
return (0.0, 0)
if action == 'backtrack':
(cur_threshold, backtrack_iter) = arguments
assert(iter2step[backtrack_iter] > 0)
del effective_logprob_changes[iter2step[backtrack_iter]:]
iter2step.append(-1)
continue
# EM steps
steps += 'EM EM'.split()
while step < len(steps):
logprob_change = RunStep(step, 0.0)
logprob_changes.append(logprob_change)
effective_logprob_changes.append(logprob_change)
step += 1
iter2step.append(step)
if action == 'success':
return (cur_threshold, model.iter)
# action == 'continue':
if model.iter > args.max_iter:
ExitProgram("Too many iterations, please set a higher --initial-threshold and rerun.")
cur_threshold = arguments
backtrack_iter = model.iter
if not os.path.isdir(work_dir):
try:
os.makedirs(work_dir)
except:
ExitProgram("error creating directory " + work_dir)
num_words = GetNumWords(args.lm_dir_in)
ngram_order = GetNgramOrder(args.lm_dir_in)
(num_unigrams, initial_num_xgrams) = GetNumGrams(args.lm_dir_in)
current_num_xgrams = None
initial_logprob_per_word = None
final_logprob_per_word = None
waiting_thread = None
logprob_changes = []
effective_logprob_changes = []
thresholds = []
CreateInitialWorkDir()
if args.check_exact_divergence == 'true':
if args.target_num_ngrams <= 0 and steps[-1] != 'EM':
LogMessage("--check-exact-divergence=true won't give you the "
"exact divergence because the last step is not 'EM'.")
waiting_thread = threading.Thread(target=GetInitialLogprob)
waiting_thread.start()
if args.target_num_ngrams > 0:
# For PruneSizeModel.MatchTargetNumNgrams() and PruneSizeModel.NumXgrams2NumNgrams()
model = PruneSizeModel(num_unigrams, args.target_num_ngrams,
args.target_lower_threshold, args.target_upper_threshold)
if model.MatchTargetNumNgrams(initial_num_xgrams):
LogMessage("the input LM is already match the size with target-num-ngrams, do not need any pruning")
sys.exit(0)
if args.target_num_ngrams > model.NumXgrams2NumNgrams(initial_num_xgrams):
ExitProgram("the num-ngrams({0}) of input LM is less than the target-num-ngrams({1}), "
"can not do any pruning.".format(
model.NumXgrams2NumNgrams(initial_num_xgrams), args.target_num_ngrams))
threshold = 0.0
initial_threshold = args.initial_threshold
while threshold == 0.0:
(threshold, iter) = FindThreshold(initial_threshold)
if threshold > 0.0:
break
logprob_changes = []
effective_logprob_changes = []
thresholds = []
steps = []
initial_threshold /= 4.0
LogMessage("Reduce --initial-threshold to {0}, and retry.".format(
initial_threshold))
LogMessage("Find the threshold {0} in {1} iteration(s)".format(threshold, iter))
LogMessage("thresholds per iter were " + str(thresholds))
else:
for step in range(len(steps)):
logprob_change = RunStep(step, args.final_threshold)
logprob_changes.append(logprob_change)
effective_logprob_changes.append(logprob_change)
FinalizeOutput(work_dir + "/step" + str(len(steps)))
if waiting_thread is not None:
waiting_thread.join()
LogMessage("log-prob changes per step were " + str(logprob_changes))
initial_num_ngrams = initial_num_xgrams + num_unigrams
current_num_ngrams = current_num_xgrams + num_unigrams
LogMessage("reduced number of n-grams from {0} to {1}, "
"i.e. by {2}%".format(initial_num_ngrams, current_num_ngrams,
100.0 * (initial_num_ngrams - current_num_ngrams) / initial_num_ngrams))
# The following prints the K-L divergence; it breaks out the parts by sign so
# you can see the effect of the E-M separately (it's usually quite small).
LogMessage("approximate K-L divergence was {0} + {1} = {2}".format(
-sum([max(0.0, x) for x in effective_logprob_changes]),
-sum([min(0.0, x) for x in effective_logprob_changes]),
-sum(effective_logprob_changes)))
if initial_logprob_per_word is not None and steps[-1] == 'EM':
LogMessage("exact K-L divergence was {0}".format(
initial_logprob_per_word - final_logprob_per_word))
# clean up the work directory.
if args.cleanup == 'true':
shutil.rmtree(work_dir)
if os.system("validate_lm_dir.py " + args.lm_dir_out) != 0:
ExitProgram("failed to validate output LM-dir")
|
{
"content_hash": "7f6889f3dcba1418a318e6abb6890d56",
"timestamp": "",
"source": "github",
"line_count": 612,
"max_line_length": 108,
"avg_line_length": 43.01633986928105,
"alnum_prop": 0.5998632530578135,
"repo_name": "wantee/pocolm",
"id": "0898772930ac234670b89d2a820bbc924c3fa536",
"size": "26421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/prune_lm_dir.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "356912"
},
{
"name": "Makefile",
"bytes": "1294"
},
{
"name": "Perl",
"bytes": "4912"
},
{
"name": "Python",
"bytes": "274790"
},
{
"name": "Shell",
"bytes": "83576"
}
],
"symlink_target": ""
}
|
from lophi.sensors.network import NetworkSensor
class NetworkSensorVirtual(NetworkSensor):
"""
Virtual network sensor for SUTs
"""
|
{
"content_hash": "83d7093e28f06bd15490f88decc4f81c",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 47,
"avg_line_length": 24.5,
"alnum_prop": 0.7278911564625851,
"repo_name": "mit-ll/LO-PHI",
"id": "104f2ff6ebac5dc7604d3676950b411a14eb1e9c",
"size": "147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-lophi/lophi/sensors/network/virtual.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "58723"
},
{
"name": "Elixir",
"bytes": "18208"
},
{
"name": "Emacs Lisp",
"bytes": "1368"
},
{
"name": "Groff",
"bytes": "1900"
},
{
"name": "M4",
"bytes": "2284"
},
{
"name": "Makefile",
"bytes": "64810"
},
{
"name": "Protocol Buffer",
"bytes": "1803"
},
{
"name": "Python",
"bytes": "1220515"
},
{
"name": "Shell",
"bytes": "23976"
}
],
"symlink_target": ""
}
|
'''
This script will look through a WireShark csv file and see if it can find
unaligned reads. "Unaligned" is any read operation that doesn't begin and end
on a 16-byte boundary. You might want to change that value for your tests.
Here are some hints and details:
* You can create a .csv file with WireShark. With WireShark 1.0.15 you do the
following:
1. Capture your data.
2. Hit the "Stop" button.
3. Select "File" -> "Export" -> "as CSV ..."
4. Specify the name of the file and click "OK".
* This script is currently hardcoded to read a file called "trace.csv". That
can be changed if necessary.
* Using the "csv" module, it reads the file one line at a time.
* Each line is converted to a list object. The object consists of:
[0] - The record number.
[1] - The timestamp.
[2] - The src IP Address.
[3] - The dst IP Address.
[4] - The protocol. For SMB2 it will say "SMB2".
[5] - The length of the frame. I'm not sure if that's the whole frame of just
some portion. It's probably the whole frame.
[6] - The details of the record that was read. For an SMB2 Read it will say:
"Read Request Len:xxxxx Off:yyyyy File: joe.txt"
* We are after the xxxxx and yyyyy portions. We extract them and check to make
sure they're multiples of 16 (using "val % 16").
'''
import csv
# Open trace.csv as an input file.
with open('trace.csv', 'r') as csvInFile:
# Get a csv object. It knows how to read csv file.
reader = csv.reader(csvInFile)
# Loop once for each record in the csv file.
for line in reader:
# Index 6 contains the details of the record.
field = line[6]
# Check to see if it's a read request. It's possible this won't
# be a good test. It worked for my purposes, but you might need
# to change it for your purposes.
readRequestIndex = field.find("Read Request")
if readRequestIndex != -1:
# Chop the data before the "Len:" field.
readLenBeg = field.find("Len:") + 4
lenStr = field[readLenBeg:]
# Chop the data after the "Len:" value.
readLenEnd = lenStr.find(" ")
lenStr = lenStr[0:readLenEnd]
# Get the read length.
len = int(lenStr)
# Chop the data before the "Off:" field.
readOffBeg = field.find("Off:") + 4
offStr = field[readOffBeg:]
# Chop the data after the "Off:" value.
readOffEnd = offStr.find(" ")
offStr = offStr[0:readOffEnd]
# Get the read offset.
off = int(offStr)
# Check to see if the offset or length is unaligned on
# a crypto boundary.
offBlockMod = off % 16
lenBlockMod = len % 16
print "len = " + str(len) + ". off = " + str(off) + "."
if offBlockMod > 0:
print "offBlockMod = " + str(offBlockMod) + "."
exit(1)
if lenBlockMod > 0:
print "lenBlockMod = " + str(lenBlockMod) + "."
exit(1)
|
{
"content_hash": "7472b64512ba9cab6452e540a73adf52",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 80,
"avg_line_length": 34.4320987654321,
"alnum_prop": 0.6618859806382216,
"repo_name": "joeriggs/experiments",
"id": "87f0ce22878aafae3e00d83408599f02b8089794",
"size": "2789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/wiresharkScanner.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "205628"
},
{
"name": "C++",
"bytes": "2869"
},
{
"name": "Makefile",
"bytes": "4726"
},
{
"name": "Python",
"bytes": "2789"
},
{
"name": "Shell",
"bytes": "28521"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bulletin', '0002_auto_20151216_2200'),
]
operations = [
migrations.CreateModel(
name='PostCategory',
fields=[
('id', models.AutoField(verbose_name='ID',
serialize=False,
auto_created=True,
primary_key=True)),
('primary', models.BooleanField(default=False)),
('category', models.ForeignKey(to='bulletin.Category')),
('post', models.ForeignKey(to='bulletin.Post')),
],
),
migrations.AddField(
model_name='post',
name='categories',
field=models.ManyToManyField(to='bulletin.Category',
through='bulletin.PostCategory',
blank=True),
),
]
|
{
"content_hash": "c5e9bb1ed02d814cb79785906cdde5ae",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 73,
"avg_line_length": 33.46875,
"alnum_prop": 0.46872082166199813,
"repo_name": "AASHE/django-bulletin",
"id": "47052c999266c37707728bc126befe05dbadce44",
"size": "1095",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bulletin/migrations/0003_add_field_post_categories.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31770"
},
{
"name": "Clojure",
"bytes": "11991"
},
{
"name": "HTML",
"bytes": "123427"
},
{
"name": "JavaScript",
"bytes": "1319347"
},
{
"name": "Procfile",
"bytes": "72"
},
{
"name": "Python",
"bytes": "271365"
}
],
"symlink_target": ""
}
|
from collections import deque
from itertools import count
filename = 'corporate_gifting.txt'
def Node(object):
def __init__(self, parent=None):
self.parent = parent
self.children = []
def solve(tree):
"""
Cases:
- CEO alone
- max flat hierarchy
- max vertical hierarchy
"""
gifts = {}
queue = deque()
stack = deque() # simulates recursion
queue.append(1) # BFS going down
while queue:
employee = queue.pop()
num_children = len(tree[employee])
if num_children == 0:
gifts[employee] = 1
else:
stack.append(employee)
for child in tree[employee]:
queue.append(child)
# print(gifts)
while stack:
# print(stack)
manager = stack.pop()
# print(manager)
subtree = 0
subtree_values = set()
values_by_subordinate = {}
for child in tree[manager]:
values_by_subordinate[child] = gifts[child]
subtree_values.add(gifts[child])
subtree += gifts[child]
# print('subtree ', subtree, ' - ', subtree_values)
# print('subs', values_by_subordinate)
# if there are children with $1 AND $2, try to optimize the more expensive
if 1 in subtree_values and 2 in subtree_values:
for subordinate, gift in values_by_subordinate.items():
if gift == 2:
if len(tree[subordinate]) == 1:
# exchange gifts
gifts[subordinate] = 2
son = tree[subordinate].pop()
gifts[son] = 1
# run subtree again
subtree = 0
subtree_values = set()
for child in tree[manager]:
subtree_values.add(gifts[child])
subtree += gifts[child]
# print('subtree optimized', subtree, ' - ', subtree_values)
for i in count(1):
if i not in subtree_values:
gifts[manager] = i
break
# print(gifts)
v = 0
for gift in gifts.values():
v += gift
return v
if __name__ == "__main__":
with open(filename) as input_file:
t = int(input_file.readline().strip())
for i in range(1, t + 1):
N = int(input_file.readline().strip())
hierarchy = map(int, input_file.readline().strip().split())
# build adjacency list
tree = {}
for j in range(1, N + 1):
tree[j] = set()
for j, manager in enumerate(hierarchy):
if manager != 0:
tree[manager].add(j + 1)
answer = solve(tree)
print("Case #%s: %s" % (i, answer))
|
{
"content_hash": "fbaa9d46c411ed151706174ecf15362e",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 82,
"avg_line_length": 26.471698113207548,
"alnum_prop": 0.5021382751247327,
"repo_name": "fcaneto/py_programming_exercises",
"id": "2eebdb1b7ab93332e93b6367a9415ae677d9fa72",
"size": "2806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fb_hackercup_2015/round_1/corporate/solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47461"
}
],
"symlink_target": ""
}
|
import os, sys
sys.path.append(os.path.abspath(os.path.join(os.curdir,"..")))
import run4v
|
{
"content_hash": "5aca8f6cce32fe89f46bc3ea0167e170",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 62,
"avg_line_length": 18.8,
"alnum_prop": 0.7021276595744681,
"repo_name": "alejandrogallo/run4v",
"id": "8d1d1fbcfe685971117f770c549b97870f29b56d",
"size": "94",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/common.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "369"
},
{
"name": "Python",
"bytes": "19066"
}
],
"symlink_target": ""
}
|
from neutron.tests import base
class TestDragonflowL3Agent(base.BaseTestCase):
def setUp(self):
super(TestDragonflowL3Agent, self).setUp()
|
{
"content_hash": "bbf9f584fb397e0ede14f1877c0903e3",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 50,
"avg_line_length": 22,
"alnum_prop": 0.7467532467532467,
"repo_name": "no2key/dragonflow",
"id": "085897ff7d60e607e534c3173a93e1297aef5dce",
"size": "797",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dragonflow/tests/unit/test_l3_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "180108"
},
{
"name": "Shell",
"bytes": "2223"
}
],
"symlink_target": ""
}
|
"""
Configuration items for the shop.
Also contains shopping cart and related classes.
"""
import datetime
import logging
import operator
import notification
import signals
try:
from decimal import Decimal
except:
from django.utils._decimal import Decimal
from django.conf import settings
from django.contrib.sites.models import Site
from django.db import models
from django.utils.encoding import force_unicode
from django.core import urlresolvers
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext, ugettext_lazy as _
from satchmo import caching
from satchmo import tax
from satchmo.configuration import ConfigurationSettings, config_value, config_choice_values, config_get_group
from satchmo.contact.models import Contact
from satchmo.contact.signals import satchmo_contact_location_changed
from satchmo.l10n.models import Country
from satchmo.l10n.utils import moneyfmt
from satchmo.payment.fields import PaymentChoiceCharField
from satchmo.product import signals as product_signals
from satchmo.product.models import Product, DownloadableProduct
from satchmo.shipping.fields import ShippingChoiceCharField
from satchmo.tax.utils import get_tax_processor
from django.contrib.sites.models import Site
log = logging.getLogger('satchmo.shop.models')
class NullConfig(object):
"""Standin for a real config when we don't have one yet."""
def __init__(self):
self.store_name = self.store_description = _("Test Store")
self.store_email = self.street1 = self.street2 = self.city = self.state = self.postal_code = self.phone = ""
self.site = self.country = None
self.no_stock_checkout = False
self.in_country_only = True
self.sales_country = None
def _options(self):
return ConfigurationSettings()
options = property(fget=_options)
def __str__(self):
return "Test Store - no configured store exists!"
class ConfigManager(models.Manager):
def get_current(self, site=None):
"""Convenience method to get the current shop config"""
if not site:
site = Site.objects.get_current()
site = site.id
try:
shop_config = caching.cache_get("Config", site)
except caching.NotCachedError, nce:
try:
shop_config = self.get(site__id__exact=site)
caching.cache_set(nce.key, value=shop_config)
except Config.DoesNotExist:
log.warning("No Shop Config found, using test shop config for site=%s.", site)
shop_config = NullConfig()
return shop_config
class Config(models.Model):
"""
Used to store specific information about a store. Also used to
configure various store behaviors
"""
site = models.OneToOneField(Site, verbose_name=_("Site"), primary_key=True)
store_name = models.CharField(_("Store Name"),max_length=100, unique=True)
store_description = models.TextField(_("Description"), blank=True, null=True)
store_email = models.EmailField(_("Email"), blank=True, null=True, max_length=75)
street1=models.CharField(_("Street"),max_length=50, blank=True, null=True)
street2=models.CharField(_("Street"), max_length=50, blank=True, null=True)
city=models.CharField(_("City"), max_length=50, blank=True, null=True)
state=models.CharField(_("State"), max_length=30, blank=True, null=True)
postal_code=models.CharField(_("Zip Code"), blank=True, null=True, max_length=9)
country=models.ForeignKey(Country, blank=True, null=False, verbose_name=_('Country'))
phone = models.CharField(_("Phone Number"), blank=True, null=True, max_length=12)
no_stock_checkout = models.BooleanField(_("Purchase item not in stock?"), default=True)
in_country_only = models.BooleanField(_("Only sell to in-country customers?"), default=True)
sales_country = models.ForeignKey(Country, blank=True, null=True,
related_name='sales_country',
verbose_name=_("Default country for customers"))
shipping_countries = models.ManyToManyField(Country, blank=True, verbose_name=_("Shipping Countries"), related_name="shop_configs")
objects = ConfigManager()
def _options(self):
return ConfigurationSettings()
options = property(fget=_options)
def areas(self):
"""Get country areas (states/counties). Used in forms."""
if self.in_country_only:
return self.sales_country.adminarea_set.filter(active=True)
else:
return None
def countries(self):
"""Get country selections. Used in forms."""
if self.in_country_only:
return Country.objects.filter(pk=self.sales_country.pk)
else:
return self.shipping_countries.filter(active=True)
def _base_url(self, secure=False):
prefix = "http"
if secure:
prefix += "s"
return prefix + "://" + self.site.domain
base_url = property(fget=_base_url)
def save(self, force_insert=False, force_update=False):
caching.cache_delete("Config", self.site.id)
# ensure the default country is in shipping countries
mycountry = self.country
if mycountry:
if not self.sales_country:
log.debug("%s: No sales_country set, adding country of store, '%s'", self, mycountry)
self.sales_country = mycountry
# This code doesn't work when creating a new site. At the time of creation, all of the necessary relationships
# aren't setup. I modified the load_store code so that it would create this relationship manually when running
# with sample data. This is a bit of a django limitation so I'm leaving this in here for now. - CBM
# salescountry = self.sales_country
# try:
# need = self.shipping_countries.get(pk=salescountry.pk)
# except Country.DoesNotExist:
# log.debug("%s: Adding default country '%s' to shipping countries", self, salescountry.iso2_code)
# self.shipping_countries.add(salescountry)
else:
log.warn("%s: has no country set", self)
super(Config, self).save(force_insert=force_insert, force_update=force_update)
caching.cache_set("Config", self.site.id, value=self)
def __unicode__(self):
return self.store_name
class Meta:
verbose_name = _("Store Configuration")
verbose_name_plural = _("Store Configurations")
class NullCart(object):
"""Standin for a real cart when we don't have one yet. More convenient than testing for null all the time."""
desc = None
date_time_created = None
customer = None
total = Decimal("0")
numItems = 0
def add_item(self, *args, **kwargs):
pass
def remove_item(self, *args, **kwargs):
pass
def empty(self):
pass
def __str__(self):
return "NullCart (empty)"
def __iter__(self):
return iter([])
def __len__(self):
return 0
class OrderCart(NullCart):
"""Allows us to fake a cart if we are reloading an order."""
def __init__(self, order):
self._order = order
def _numItems(self):
return self._order.orderitem_set.count()
numItems = property(_numItems)
def _cartitem_set(self):
return self._order.orderitem_set
cartitem_set = property(_cartitem_set)
def _total(self):
return self._order.balance
total = property(_total)
is_shippable = False
def __str__(self):
return "OrderCart (%i) = %i" % (self._order.id, len(self))
def __len__(self):
return self.numItems
class CartManager(models.Manager):
def from_request(self, request, create=False, return_nullcart=True):
"""Get the current cart from the request"""
cart = None
try:
contact = Contact.objects.from_request(request, create=False)
except Contact.DoesNotExist:
contact = None
if 'cart' in request.session:
cartid = request.session['cart']
if cartid == "order":
log.debug("Getting Order Cart from request")
try:
order = Order.objects.from_request(request)
cart = OrderCart(order)
except Order.DoesNotExist:
pass
else:
try:
cart = Cart.objects.get(id=cartid)
except Cart.DoesNotExist:
log.debug('Removing invalid cart from session')
del request.session['cart']
if isinstance(cart, NullCart) and not isinstance(cart, OrderCart) and contact is not None:
carts = Cart.objects.filter(customer=contact)
if carts.count() > 0:
cart = carts[0]
request.session['cart'] = cart.id
if not cart:
if create:
site = Site.objects.get_current()
if contact is None:
cart = Cart(site=site)
else:
cart = Cart(site=site, customer=contact)
cart.save()
request.session['cart'] = cart.id
elif return_nullcart:
cart = NullCart()
else:
raise Cart.DoesNotExist()
#log.debug("Cart: %s", cart)
return cart
class Cart(models.Model):
"""
Store items currently in a cart
The desc isn't used but it is needed to make the admin interface work appropriately
Could be used for debugging
"""
site = models.ForeignKey(Site, verbose_name=_('Site'))
desc = models.CharField(_("Description"), blank=True, null=True, max_length=150)
date_time_created = models.DateTimeField(_("Creation Date"))
customer = models.ForeignKey(Contact, blank=True, null=True, verbose_name=_('Customer'))
objects = CartManager()
def _get_count(self):
itemCount = 0
for item in self.cartitem_set.all():
itemCount += item.quantity
return (itemCount)
numItems = property(_get_count)
def _get_total(self):
total = Decimal("0")
for item in self.cartitem_set.all():
total += item.line_total
return(total)
total = property(_get_total)
def __iter__(self):
return iter(self.cartitem_set.all())
def __len__(self):
return self.cartitem_set.count()
def __unicode__(self):
return u"Shopping Cart (%s)" % self.date_time_created
def add_item(self, chosen_item, number_added, details={}):
alreadyInCart = False
# Custom Products will not be added, they will each get their own line item
if 'CustomProduct' in chosen_item.get_subtypes():
item_to_modify = CartItem(cart=self, product=chosen_item, quantity=0)
else:
item_to_modify = CartItem(cart=self, product=chosen_item, quantity=0)
for similarItem in self.cartitem_set.filter(product__id = chosen_item.id):
looksTheSame = len(details) == similarItem.details.count()
if looksTheSame:
for detail in details:
try:
similarItem.details.get(
name=detail['name'],
value=detail['value'],
price_change=detail['price_change']
)
except CartItemDetails.DoesNotExist:
looksTheSame = False
if looksTheSame:
item_to_modify = similarItem
alreadyInCart = True
break
if not alreadyInCart:
signals.satchmo_cart_add_verify.send(self, cart=self, cartitem=item_to_modify, added_quantity=number_added, details=details)
self.cartitem_set.add(item_to_modify)
item_to_modify.quantity += number_added
item_to_modify.save()
if not alreadyInCart:
for data in details:
item_to_modify.add_detail(data)
return item_to_modify
def remove_item(self, chosen_item_id, number_removed):
item_to_modify = self.cartitem_set.get(id = chosen_item_id)
item_to_modify.quantity -= number_removed
if item_to_modify.quantity <= 0:
item_to_modify.delete()
self.save()
def empty(self):
for item in self.cartitem_set.all():
item.delete()
self.save()
def save(self, force_insert=False, force_update=False):
"""Ensure we have a date_time_created before saving the first time."""
if not self.pk:
self.date_time_created = datetime.datetime.now()
try:
site = self.site
except Site.DoesNotExist:
self.site = Site.objects.get_current()
super(Cart, self).save(force_insert=force_insert, force_update=force_update)
def _get_shippable(self):
"""Return whether the cart contains shippable items."""
for cartitem in self.cartitem_set.all():
if cartitem.is_shippable:
return True
return False
is_shippable = property(_get_shippable)
def get_shipment_list(self):
"""Return a list of shippable products, where each item is split into
multiple elements, one for each quantity."""
items = []
for cartitem in self.cartitem_set.all():
if cartitem.is_shippable:
p = cartitem.product
for single in range(0,cartitem.quantity):
items.append(p)
return items
class Meta:
verbose_name = _("Shopping Cart")
verbose_name_plural = _("Shopping Carts")
class NullCartItem(object):
def __init__(self, itemid):
self.id = itemid
self.quantity = 0
self.line_total = 0
class CartItem(models.Model):
"""
An individual item in the cart
"""
cart = models.ForeignKey(Cart, verbose_name=_('Cart'))
product = models.ForeignKey(Product, verbose_name=_('Product'))
quantity = models.IntegerField(_("Quantity"), )
notes = models.CharField(_("Notes"), blank=True, null=True, max_length=150)
def _get_line_unitprice(self):
# Get the qty discount price as the unit price for the line.
self.qty_price = self.get_qty_price(self.quantity)
self.detail_price = self.get_detail_price()
#send signal to possibly adjust the unitprice
signals.satchmo_cartitem_price_query.send(self, cartitem=self)
price = self.qty_price + self.detail_price
#clean up temp vars
del self.qty_price
del self.detail_price
return price
unit_price = property(_get_line_unitprice)
def get_detail_price(self):
"""Get the delta price based on detail modifications"""
delta = Decimal("0")
if self.has_details:
for detail in self.details.all():
if detail.price_change and detail.value:
delta += detail.price_change
return delta
def get_qty_price(self, qty):
"""Get the price for for each unit before any detail modifications"""
return self.product.get_qty_price(qty)
def _get_line_total(self):
return self.unit_price * self.quantity
line_total = property(_get_line_total)
def _get_description(self):
return self.product.translated_name()
description = property(_get_description)
def _is_shippable(self):
return self.product.is_shippable
is_shippable = property(fget=_is_shippable)
def add_detail(self, data):
detl = CartItemDetails(cartitem=self, name=data['name'], value=data['value'], sort_order=data['sort_order'], price_change=data['price_change'])
detl.save()
#self.details.add(detl)
def _has_details(self):
"""
Determine if this specific item has more detail
"""
return (self.details.count() > 0)
has_details = property(_has_details)
def __unicode__(self):
currency = config_value('SHOP', 'CURRENCY')
currency = currency.replace("_", " ")
return u'%s - %s %s%s' % (self.quantity, self.product.name,
force_unicode(currency), self.line_total)
class Meta:
verbose_name = _("Cart Item")
verbose_name_plural = _("Cart Items")
ordering = ('id',)
class CartItemDetails(models.Model):
"""
An arbitrary detail about a cart item.
"""
cartitem = models.ForeignKey(CartItem, related_name='details', )
value = models.TextField(_('detail'))
name = models.CharField(_('name'), max_length=100)
price_change = models.DecimalField(_("Item Detail Price Change"), max_digits=6, decimal_places=2, blank=True, null=True)
sort_order = models.IntegerField(_("Sort Order"),
help_text=_("The display order for this group."))
class Meta:
ordering = ('sort_order',)
verbose_name = _("Cart Item Detail")
verbose_name_plural = _("Cart Item Details")
ORDER_CHOICES = (
('Online', _('Online')),
('In Person', _('In Person')),
('Show', _('Show')),
)
ORDER_STATUS = (
('Temp', _('Temp')),
('Pending', _('Pending')),
('In Process', _('In Process')),
('Billed', _('Billed')),
('Shipped', _('Shipped')),
)
class OrderManager(models.Manager):
def from_request(self, request):
"""Get the order from the session
Returns:
- Order object or None
"""
order = None
if 'orderID' in request.session:
try:
order = Order.objects.get(id=request.session['orderID'])
# TODO: Validate against logged-in user.
except Order.DoesNotExist:
pass
if not order:
del request.session['orderID']
if not order:
raise Order.DoesNotExist()
return order
def remove_partial_order(self, request):
"""Delete cart from request if it exists and is incomplete (has no status)"""
try:
order = Order.objects.from_request(request)
if not order.status:
del request.session['orderID']
log.info("Deleting incomplete order #%i from database", order.id)
order.delete()
return True
except Order.DoesNotExist:
pass
return False
class Order(models.Model):
"""
Orders contain a copy of all the information at the time the order was
placed.
"""
site = models.ForeignKey(Site, verbose_name=_('Site'))
contact = models.ForeignKey(Contact, verbose_name=_('Contact'))
ship_addressee = models.CharField(_("Addressee"), max_length=61, blank=True)
ship_street1 = models.CharField(_("Street"), max_length=80, blank=True)
ship_street2 = models.CharField(_("Street"), max_length=80, blank=True)
ship_city = models.CharField(_("City"), max_length=50, blank=True)
ship_state = models.CharField(_("State"), max_length=50, blank=True)
ship_postal_code = models.CharField(_("Zip Code"), max_length=30, blank=True)
ship_country = models.CharField(_("Country"), max_length=2, blank=True)
bill_addressee = models.CharField(_("Addressee"), max_length=61, blank=True)
bill_street1 = models.CharField(_("Street"), max_length=80, blank=True)
bill_street2 = models.CharField(_("Street"), max_length=80, blank=True)
bill_city = models.CharField(_("City"), max_length=50, blank=True)
bill_state = models.CharField(_("State"), max_length=50, blank=True)
bill_postal_code = models.CharField(_("Zip Code"), max_length=30, blank=True)
bill_country = models.CharField(_("Country"), max_length=2, blank=True)
notes = models.TextField(_("Notes"), blank=True, null=True)
sub_total = models.DecimalField(_("Subtotal"),
max_digits=18, decimal_places=10, blank=True, null=True)
total = models.DecimalField(_("Total"),
max_digits=18, decimal_places=10, blank=True, null=True)
discount_code = models.CharField(_("Discount Code"), max_length=20, blank=True, null=True,
help_text=_("Coupon Code"))
discount = models.DecimalField(_("Discount amount"),
max_digits=18, decimal_places=10, blank=True, null=True)
method = models.CharField(_("Order method"),
choices=ORDER_CHOICES, max_length=50, blank=True)
shipping_description = models.CharField(_("Shipping Description"),
max_length=50, blank=True, null=True)
shipping_method = models.CharField(_("Shipping Method"),
max_length=50, blank=True, null=True)
shipping_model = ShippingChoiceCharField(_("Shipping Models"),
max_length=30, blank=True, null=True)
shipping_cost = models.DecimalField(_("Shipping Cost"),
max_digits=18, decimal_places=10, blank=True, null=True)
shipping_discount = models.DecimalField(_("Shipping Discount"),
max_digits=18, decimal_places=10, blank=True, null=True)
tax = models.DecimalField(_("Tax"),
max_digits=18, decimal_places=10, blank=True, null=True)
time_stamp = models.DateTimeField(_("Timestamp"), blank=True, null=True)
status = models.CharField(_("Status"), max_length=20, choices=ORDER_STATUS,
blank=True, help_text=_("This is set automatically."))
objects = OrderManager()
def __unicode__(self):
return "Order #%s: %s" % (self.id, self.contact.full_name)
def add_status(self, status=None, notes=None):
orderstatus = OrderStatus()
if not status:
if self.orderstatus_set.count() > 0:
curr_status = self.orderstatus_set.all().order_by('-time_stamp')[0]
status = curr_status.status
else:
status = 'Pending'
orderstatus.status = status
orderstatus.notes = notes
orderstatus.time_stamp = datetime.datetime.now()
orderstatus.order = self
orderstatus.save()
def add_variable(self, key, value):
"""Add an OrderVariable, used for misc stuff that is just too small to get its own field"""
try:
v = self.variables.get(key__exact=key)
v.value = value
except OrderVariable.DoesNotExist:
v = OrderVariable(order=self, key=key, value=value)
v.save()
def get_variable(self, key, default=None):
qry = self.variables.filter(key__exact=key)
ct = qry.count()
if ct == 0:
return default
else:
return qry[0]
def copy_addresses(self):
"""
Copy the addresses so we know what the information was at time of order.
"""
shipaddress = self.contact.shipping_address
billaddress = self.contact.billing_address
self.ship_addressee = shipaddress.addressee
self.ship_street1 = shipaddress.street1
self.ship_street2 = shipaddress.street2
self.ship_city = shipaddress.city
self.ship_state = shipaddress.state
self.ship_postal_code = shipaddress.postal_code
self.ship_country = shipaddress.country.iso2_code
self.bill_addressee = billaddress.addressee
self.bill_street1 = billaddress.street1
self.bill_street2 = billaddress.street2
self.bill_city = billaddress.city
self.bill_state = billaddress.state
self.bill_postal_code = billaddress.postal_code
self.bill_country = billaddress.country.iso2_code
def remove_all_items(self):
"""Delete all items belonging to this order."""
for item in self.orderitem_set.all():
item.delete()
self.save()
def _balance(self):
if self.total is None:
self.force_recalculate_total(save=True)
return self.total-self.balance_paid
balance = property(fget=_balance)
def balance_forward(self):
return moneyfmt(self.balance)
balance_forward = property(fget=balance_forward)
def _balance_paid(self):
payments = [p.amount for p in self.payments.all()]
if payments:
return reduce(operator.add, payments)
else:
return Decimal("0.0000000000")
balance_paid = property(_balance_paid)
def _credit_card(self):
"""Return the credit card associated with this payment."""
for payment in self.payments.order_by('-time_stamp'):
try:
if payment.creditcards.count() > 0:
return payment.creditcards.get()
except payments.creditcards.model.DoesNotExist:
pass
return None
credit_card = property(_credit_card)
def _full_bill_street(self, delim="\n"):
"""
Return both billing street entries separated by delim.
Note - Use linebreaksbr filter to convert to html in templates.
"""
if self.bill_street2:
address = self.bill_street1 + delim + self.bill_street2
else:
address = self.bill_street1
return mark_safe(address)
full_bill_street = property(_full_bill_street)
def _full_ship_street(self, delim="\n"):
"""
Return both shipping street entries separated by delim.
Note - Use linebreaksbr filterto convert to html in templates.
"""
if self.ship_street2:
address = self.ship_street1 + delim + self.ship_street2
else:
address = self.ship_street1
return mark_safe(address)
full_ship_street = property(_full_ship_street)
def _ship_country_name(self):
return Country.objects.get(iso2_code=self.ship_country).name
ship_country_name = property(_ship_country_name)
def _bill_country_name(self):
return Country.objects.get(iso2_code=self.bill_country).name
bill_country_name = property(_bill_country_name)
def _get_balance_remaining_url(self):
return ('satchmo_balance_remaining_order', None, {'order_id' : self.id})
get_balance_remaining_url = models.permalink(_get_balance_remaining_url)
def _partially_paid(self):
return self.balance_paid > Decimal("0.01")
partially_paid = property(_partially_paid)
def _is_partially_paid(self):
if self.total:
balance = self.balance
return float(balance) > Decimal("0.01") and self.balance != self.balance_paid
else:
return False
is_partially_paid = property(fget=_is_partially_paid)
def payments_completed(self):
q = self.payments.exclude(transaction_id__isnull = False, transaction_id = "PENDING")
result = [p for p in q if p.amount]
return result
def save(self, force_insert=False, force_update=False):
"""
Copy addresses from contact. If the order has just been created, set
the create_date.
"""
if not self.pk:
self.time_stamp = datetime.datetime.now()
self.copy_addresses()
super(Order, self).save(force_insert=force_insert, force_update=force_update) # Call the "real" save() method.
def invoice(self):
url = urlresolvers.reverse('satchmo_print_shipping', None, None, {'doc' : 'invoice', 'id' : self.id})
return mark_safe(u'<a href="%s">%s</a>' % (url, ugettext('View')))
invoice.allow_tags = True
def _item_discount(self):
"""Get the discount of just the items, less the shipping discount."""
return self.discount-self.shipping_discount
item_discount = property(_item_discount)
def packingslip(self):
url = urlresolvers.reverse('satchmo_print_shipping', None, None, {'doc' : 'packingslip', 'id' : self.id})
return mark_safe(u'<a href="%s">%s</a>' % (url, ugettext('View')))
packingslip.allow_tags = True
def recalculate_total(self, save=True):
"""Calculates sub_total, taxes and total if the order is not already partially paid."""
if self.is_partially_paid:
log.debug("Order %i - skipping recalculate_total since product is partially paid.", self.id)
else:
self.force_recalculate_total(save=save)
def force_recalculate_total(self, save=True):
"""Calculates sub_total, taxes and total."""
from satchmo.discount.utils import find_discount_for_code
zero = Decimal("0.0000000000")
discount = find_discount_for_code(self.discount_code)
discount.calc(self)
self.discount = discount.total
discounts = discount.item_discounts
itemprices = []
fullprices = []
for lineitem in self.orderitem_set.all():
lid = lineitem.id
if lid in discounts:
lineitem.discount = discounts[lid]
else:
lineitem.discount = zero
if save:
lineitem.save()
itemprices.append(lineitem.sub_total)
fullprices.append(lineitem.line_item_price)
if 'Shipping' in discounts:
self.shipping_discount = discounts['Shipping']
else:
self.shipping_discount = zero
if itemprices:
item_sub_total = reduce(operator.add, itemprices)
else:
item_sub_total = zero
if fullprices:
full_sub_total = reduce(operator.add, fullprices)
else:
full_sub_total = zero
self.sub_total = full_sub_total
taxProcessor = get_tax_processor(self)
totaltax, taxrates = taxProcessor.process()
self.tax = totaltax
# clear old taxes
for taxdetl in self.taxes.all():
taxdetl.delete()
for taxdesc, taxamt in taxrates.items():
taxdetl = OrderTaxDetail(order=self, tax=taxamt, description=taxdesc, method=taxProcessor.method)
taxdetl.save()
log.debug("Order #%i, recalc: sub_total=%s, shipping=%s, discount=%s, tax=%s",
self.id,
moneyfmt(item_sub_total),
moneyfmt(self.shipping_sub_total),
moneyfmt(self.discount),
moneyfmt(self.tax))
self.total = Decimal(item_sub_total + self.shipping_sub_total + self.tax)
if save:
self.save()
def shippinglabel(self):
url = urlresolvers.reverse('satchmo_print_shipping', None, None, {'doc' : 'shippinglabel', 'id' : self.id})
return mark_safe(u'<a href="%s">%s</a>' % (url, ugettext('View')))
shippinglabel.allow_tags = True
def _order_total(self):
#Needed for the admin list display
return moneyfmt(self.total)
order_total = property(_order_total)
def order_success(self):
"""Run each item's order_success method."""
log.info("Order success: %s", self)
for orderitem in self.orderitem_set.all():
subtype = orderitem.product.get_subtype_with_attr('order_success')
if subtype:
subtype.order_success(self, orderitem)
if self.is_downloadable:
self.add_status('Shipped', ugettext("Order immediately available for download"))
signals.order_success.send(self, order=self)
def _paid_in_full(self):
"""True if total has been paid"""
return self.balance < Decimal('0.01')
paid_in_full = property(fget=_paid_in_full)
def _has_downloads(self):
"""Determine if there are any downloadable products on this order"""
if self.downloadlink_set.count() > 0:
return True
return False
has_downloads = property(_has_downloads)
def _is_downloadable(self):
"""Determine if all products on this order are downloadable"""
for orderitem in self.orderitem_set.all():
if not orderitem.product.is_downloadable:
return False
return True
is_downloadable = property(_is_downloadable)
def _is_shippable(self):
"""Determine if we will be shipping any items on this order """
for orderitem in self.orderitem_set.all():
if orderitem.is_shippable:
return True
return False
is_shippable = property(_is_shippable)
def _shipping_sub_total(self):
if self.shipping_cost is None:
self.shipping_cost = Decimal('0.00')
if self.shipping_discount is None:
self.shipping_discount = Decimal('0.00')
return self.shipping_cost-self.shipping_discount
shipping_sub_total = property(_shipping_sub_total)
def _shipping_tax(self):
rates = self.taxes.filter(description__iexact = 'shipping')
if rates.count()>0:
tax = reduce(operator.add, [t.tax for t in rates])
else:
tax = Decimal("0.0000000000")
return tax
shipping_tax = property(_shipping_tax)
def _shipping_with_tax(self):
return self.shipping_sub_total + self.shipping_tax
shipping_with_tax = property(_shipping_with_tax)
def sub_total_with_tax(self):
return reduce(operator.add, [o.total_with_tax for o in self.orderitem_set.all()])
def validate(self, request):
"""
Return whether the order is valid.
Not guaranteed to be side-effect free.
"""
valid = True
for orderitem in self.orderitem_set.all():
for subtype_name in orderitem.product.get_subtypes():
subtype = getattr(orderitem.product, subtype_name.lower())
validate_method = getattr(subtype, 'validate_order', None)
if validate_method:
valid = valid and validate_method(request, self, orderitem)
return valid
class Meta:
verbose_name = _("Product Order")
verbose_name_plural = _("Product Orders")
class OrderItem(models.Model):
"""
A line item on an order.
"""
order = models.ForeignKey(Order, verbose_name=_("Order"))
product = models.ForeignKey(Product, verbose_name=_("Product"))
quantity = models.IntegerField(_("Quantity"), )
unit_price = models.DecimalField(_("Unit price"),
max_digits=18, decimal_places=10)
unit_tax = models.DecimalField(_("Unit tax"),
max_digits=18, decimal_places=10, null=True)
line_item_price = models.DecimalField(_("Line item price"),
max_digits=18, decimal_places=10)
tax = models.DecimalField(_("Line item tax"),
max_digits=18, decimal_places=10, null=True)
expire_date = models.DateField(_("Subscription End"), help_text=_("Subscription expiration date."), blank=True, null=True)
completed = models.BooleanField(_("Completed"), default=False)
discount = models.DecimalField(_("Line item discount"),
max_digits=18, decimal_places=10, blank=True, null=True)
notes = models.CharField(_("Notes"), blank=True, null=True, max_length=150)
def __unicode__(self):
return self.product.translated_name()
def _get_category(self):
return(self.product.get_category.translated_name())
category = property(_get_category)
def _is_shippable(self):
return self.product.is_shippable
is_shippable = property(fget=_is_shippable)
def _sub_total(self):
if self.discount:
return self.line_item_price-self.discount
else:
return self.line_item_price
sub_total = property(_sub_total)
def _total_with_tax(self):
return self.sub_total + self.tax
total_with_tax = property(_total_with_tax)
def _unit_price_with_tax(self):
return self.unit_price + self.unit_tax
unit_price_with_tax = property(_unit_price_with_tax)
def _get_description(self):
return self.product.translated_name()
description = property(_get_description)
def save(self, force_insert=False, force_update=False):
self.update_tax()
super(OrderItem, self).save(force_insert=force_insert, force_update=force_update)
def update_tax(self):
taxclass = self.product.taxClass
processor = get_tax_processor(order=self.order)
self.unit_tax = processor.by_price(taxclass, self.unit_price)
self.tax = processor.by_orderitem(self)
class Meta:
verbose_name = _("Order Line Item")
verbose_name_plural = _("Order Line Items")
ordering = ('id',)
class OrderItemDetail(models.Model):
"""
Name, value pair and price delta associated with a specific item in an order
"""
item = models.ForeignKey(OrderItem, verbose_name=_("Order Item"), related_name='details',)
name = models.CharField(_('Name'), max_length=100)
value = models.CharField(_('Value'), max_length=255)
price_change = models.DecimalField(_("Price Change"), max_digits=18, decimal_places=10, blank=True, null=True)
sort_order = models.IntegerField(_("Sort Order"),
help_text=_("The display order for this group."))
def __unicode__(self):
return u"%s - %s,%s" % (self.item, self.name, self.value)
class Meta:
verbose_name = _("Order Item Detail")
verbose_name_plural = _("Order Item Details")
ordering = ('sort_order',)
class DownloadLink(models.Model):
downloadable_product = models.ForeignKey(DownloadableProduct, verbose_name=_('Downloadable product'))
order = models.ForeignKey(Order, verbose_name=_('Order'))
key = models.CharField(_('Key'), max_length=40)
num_attempts = models.IntegerField(_('Number of attempts'), )
time_stamp = models.DateTimeField(_('Time stamp'), )
active = models.BooleanField(_('Active'), default=True)
def _attempts_left(self):
return self.downloadable_product.num_allowed_downloads - self.num_attempts
attempts_left = property(_attempts_left)
def is_valid(self):
# Check num attempts and expire_minutes
if not self.downloadable_product.active:
return (False, _("This download is no longer active"))
if self.num_attempts >= self.downloadable_product.num_allowed_downloads:
return (False, _("You have exceeded the number of allowed downloads."))
expire_time = datetime.timedelta(minutes=self.downloadable_product.expire_minutes) + self.time_stamp
if datetime.datetime.now() > expire_time:
return (False, _("This download link has expired."))
return (True, "")
def get_absolute_url(self):
return('satchmo.shop.views.download.process', (), { 'download_key': self.key})
get_absolute_url = models.permalink(get_absolute_url)
def get_full_url(self):
url = urlresolvers.reverse('satchmo_download_process', kwargs= {'download_key': self.key})
return('http://%s%s' % (Site.objects.get_current(), url))
def save(self, force_insert=False, force_update=False):
"""
Set the initial time stamp
"""
if self.time_stamp is None:
self.time_stamp = datetime.datetime.now()
super(DownloadLink, self).save(force_insert=force_insert, force_update=force_update)
def __unicode__(self):
return u"%s - %s" % (self.downloadable_product.product.slug, self.time_stamp)
def _product_name(self):
return u"%s" % (self.downloadable_product.product.translated_name())
product_name=property(_product_name)
class Meta:
verbose_name = _("Download Link")
verbose_name_plural = _("Download Links")
class OrderStatus(models.Model):
"""
An order will have multiple statuses as it moves its way through processing.
"""
order = models.ForeignKey(Order, verbose_name=_("Order"))
status = models.CharField(_("Status"),
max_length=20, choices=ORDER_STATUS, blank=True)
notes = models.CharField(_("Notes"), max_length=100, blank=True)
time_stamp = models.DateTimeField(_("Timestamp"))
def __unicode__(self):
return self.status
def save(self, force_insert=False, force_update=False):
super(OrderStatus, self).save(force_insert=force_insert, force_update=force_update)
self.order.status = self.status
self.order.save()
class Meta:
verbose_name = _("Order Status")
verbose_name_plural = _("Order Statuses")
ordering = ('time_stamp',)
class OrderPayment(models.Model):
order = models.ForeignKey(Order, related_name="payments")
payment = PaymentChoiceCharField(_("Payment Method"),
max_length=25, blank=True)
amount = models.DecimalField(_("amount"),
max_digits=18, decimal_places=10, blank=True, null=True)
time_stamp = models.DateTimeField(_("timestamp"), blank=True, null=True)
transaction_id = models.CharField(_("Transaction ID"), max_length=25, blank=True, null=True)
def _credit_card(self):
"""Return the credit card associated with this payment."""
try:
return self.creditcards.get()
except self.creditcards.model.DoesNotExist:
return None
credit_card = property(_credit_card)
def _amount_total(self):
return moneyfmt(self.amount)
amount_total = property(fget=_amount_total)
def __unicode__(self):
if self.id is not None:
return u"Order payment #%i" % self.id
else:
return u"Order payment (unsaved)"
def save(self, force_insert=False, force_update=False):
if not self.pk:
self.time_stamp = datetime.datetime.now()
super(OrderPayment, self).save(force_insert=force_insert, force_update=force_update)
def getPaymentMethodConfig(self):
payment_method = self.payment.encode()
return config_get_group('PAYMENT_' + payment_method)
class Meta:
verbose_name = _("Order Payment")
verbose_name_plural = _("Order Payments")
class OrderVariable(models.Model):
order = models.ForeignKey(Order, related_name="variables")
key = models.SlugField(_('key'), )
value = models.CharField(_('value'), max_length=100)
class Meta:
ordering=('key',)
verbose_name = _("Order variable")
verbose_name_plural = _("Order variables")
def __unicode__(self):
if len(self.value)>10:
v = self.value[:10] + '...'
else:
v = self.value
return u"OrderVariable: %s=%s" % (self.key, v)
class OrderTaxDetail(models.Model):
"""A tax line item"""
order = models.ForeignKey(Order, related_name="taxes")
method = models.CharField(_("Model"), max_length=50, )
description = models.CharField(_("Description"), max_length=50, blank=True)
tax = models.DecimalField(_("Tax"),
max_digits=18, decimal_places=10, blank=True, null=True)
def __unicode__(self):
if self.description:
return u"Tax: %s %s" % (self.description, self.tax)
else:
return u"Tax: %s" % self.tax
class Meta:
verbose_name = _('Order tax detail')
verbose_name_plural = _('Order tax details')
ordering = ('id',)
def _remove_order_on_cart_update(request=None, cart=None, **kwargs):
if request:
log.debug("caught cart changed signal - remove_order_on_cart_update")
Order.objects.remove_partial_order(request)
def _recalc_total_on_contact_change(contact=None, **kwargs):
#TODO: pull just the current order once we start using threadlocal middleware
log.debug("Recalculating all contact orders not in process")
orders = Order.objects.filter(contact=contact, status="")
log.debug("Found %i orders to recalc", orders.count())
for order in orders:
order.copy_addresses()
order.recalculate_total()
def _create_download_link(product=None, order=None, subtype=None, **kwargs):
if product and order and subtype == "download":
new_link = DownloadLink(downloadable_product=product, order=order, key=product.create_key(), num_attempts=0)
new_link.save()
else:
log.debug("ignoring subtype_order_success signal, looking for download product, got %s", subtype)
signals.satchmo_cart_changed.connect(_remove_order_on_cart_update, sender=None)
satchmo_contact_location_changed.connect(_recalc_total_on_contact_change, sender=None)
signals.order_success.connect(notification.order_success_listener, sender=None)
product_signals.subtype_order_success.connect(_create_download_link, sender=None)
import config
|
{
"content_hash": "810a7787f35531947a4382fe6c82ae4a",
"timestamp": "",
"source": "github",
"line_count": 1187,
"max_line_length": 152,
"avg_line_length": 37.61246840775063,
"alnum_prop": 0.6192940017022801,
"repo_name": "sankroh/satchmo",
"id": "409a1d1dbaf177e6e0b73d5f5648c5745f7524a1",
"size": "44646",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "satchmo/shop/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import contextlib
import copy
import datetime
import errno
import glob
import os
import random
import re
import shutil
import signal
import threading
import time
import uuid
import eventlet
from eventlet import greenthread
import fixtures
from lxml import etree
import mock
from mox3 import mox
from os_brick.initiator import connector
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import encodeutils
from oslo_utils import fileutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
import six
from six.moves import builtins
from six.moves import range
from nova.api.metadata import base as instance_metadata
from nova.compute import arch
from nova.compute import cpumodel
from nova.compute import manager
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.network import model as network_model
from nova import objects
from nova.objects import fields
from nova.pci import manager as pci_manager
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
import nova.tests.unit.image.fake
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_pci_device
from nova.tests.unit.objects import test_vcpu_model
from nova.tests.unit.virt.libvirt import fake_imagebackend
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import fake
from nova.virt import firewall as base_firewall
from nova.virt import hardware
from nova.virt.image import model as imgmodel
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt.storage import dmcrypt
from nova.virt.libvirt.storage import lvm
from nova.virt.libvirt.storage import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt.volume import volume as volume_drivers
libvirt_driver.libvirt = fakelibvirt
host.libvirt = fakelibvirt
libvirt_guest.libvirt = fakelibvirt
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
CONF.import_opt('instances_path', 'nova.compute.manager')
_fake_network_info = fake_network.fake_get_instance_nw_info
_fake_NodeDevXml = \
{"pci_0000_04_00_3": """
<device>
<name>pci_0000_04_00_3</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igb</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>0</slot>
<function>3</function>
<product id='0x1521'>I350 Gigabit Network Connection</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<capability type='virt_functions'>
<address domain='0x0000' bus='0x04' slot='0x10' function='0x3'/>
<address domain='0x0000' bus='0x04' slot='0x10' function='0x7'/>
<address domain='0x0000' bus='0x04' slot='0x11' function='0x3'/>
<address domain='0x0000' bus='0x04' slot='0x11' function='0x7'/>
</capability>
</capability>
</device>""",
"pci_0000_04_10_7": """
<device>
<name>pci_0000_04_10_7</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igbvf</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>16</slot>
<function>7</function>
<product id='0x1520'>I350 Ethernet Controller Virtual Function
</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<capability type='phys_function'>
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
</capability>
<capability type='virt_functions'>
</capability>
</capability>
</device>""",
"pci_0000_04_11_7": """
<device>
<name>pci_0000_04_11_7</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igbvf</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>17</slot>
<function>7</function>
<product id='0x1520'>I350 Ethernet Controller Virtual Function
</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<numa node='0'/>
<capability type='phys_function'>
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
</capability>
<capability type='virt_functions'>
</capability>
</capability>
</device>"""}
_fake_cpu_info = {
"arch": "test_arch",
"model": "test_model",
"vendor": "test_vendor",
"topology": {
"sockets": 1,
"cores": 8,
"threads": 16
},
"features": ["feature1", "feature2"]
}
def _concurrency(signal, wait, done, target, is_block_dev=False):
signal.send()
wait.wait()
done.send()
class FakeVirDomainSnapshot(object):
def __init__(self, dom=None):
self.dom = dom
def delete(self, flags):
pass
class FakeVirtDomain(object):
def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None):
if uuidstr is None:
uuidstr = str(uuid.uuid4())
self.uuidstr = uuidstr
self.id = id
self.domname = name
self._info = [power_state.RUNNING, 2048 * units.Mi, 1234 * units.Mi,
None, None]
if fake_xml:
self._fake_dom_xml = fake_xml
else:
self._fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
"""
def name(self):
if self.domname is None:
return "fake-domain %s" % self
else:
return self.domname
def ID(self):
return self.id
def info(self):
return self._info
def create(self):
pass
def managedSave(self, *args):
pass
def createWithFlags(self, launch_flags):
pass
def XMLDesc(self, flags):
return self._fake_dom_xml
def UUIDString(self):
return self.uuidstr
def attachDeviceFlags(self, xml, flags):
pass
def attachDevice(self, xml):
pass
def detachDeviceFlags(self, xml, flags):
pass
def snapshotCreateXML(self, xml, flags):
pass
def blockCommit(self, disk, base, top, bandwidth=0, flags=0):
pass
def blockRebase(self, disk, base, bandwidth=0, flags=0):
pass
def blockJobInfo(self, path, flags):
pass
def resume(self):
pass
def destroy(self):
pass
def fsFreeze(self, disks=None, flags=0):
pass
def fsThaw(self, disks=None, flags=0):
pass
class CacheConcurrencyTestCase(test.NoDBTestCase):
def setUp(self):
super(CacheConcurrencyTestCase, self).setUp()
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
# utils.synchronized() will create the lock_path for us if it
# doesn't already exist. It will also delete it when it's done,
# which can cause race conditions with the multiple threads we
# use for tests. So, create the path here so utils.synchronized()
# won't delete it out from under one of the threads.
self.lock_path = os.path.join(CONF.instances_path, 'locks')
fileutils.ensure_tree(self.lock_path)
def fake_exists(fname):
basedir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
if fname == basedir or fname == self.lock_path:
return True
return False
def fake_execute(*args, **kwargs):
pass
def fake_extend(image, size, use_cow=False):
pass
self.stubs.Set(os.path, 'exists', fake_exists)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(imagebackend.disk, 'extend', fake_extend)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def _fake_instance(self, uuid):
return objects.Instance(id=1, uuid=uuid)
def test_same_fname_concurrency(self):
# Ensures that the same fname cache runs at a sequentially.
uuid = uuidutils.generate_uuid()
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname', None,
signal=sig2, wait=wait2, done=done2)
wait2.send()
eventlet.sleep(0)
try:
self.assertFalse(done2.ready())
finally:
wait1.send()
done1.wait()
eventlet.sleep(0)
self.assertTrue(done2.ready())
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
def test_different_fname_concurrency(self):
# Ensures that two different fname caches are concurrent.
uuid = uuidutils.generate_uuid()
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname2', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname1', None,
signal=sig2, wait=wait2, done=done2)
eventlet.sleep(0)
# Wait for thread 2 to start.
sig2.wait()
wait2.send()
tries = 0
while not done2.ready() and tries < 10:
eventlet.sleep(0)
tries += 1
try:
self.assertTrue(done2.ready())
finally:
wait1.send()
eventlet.sleep(0)
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
class FakeVolumeDriver(object):
def __init__(self, *args, **kwargs):
pass
def attach_volume(self, *args):
pass
def detach_volume(self, *args):
pass
def get_xml(self, *args):
return ""
def get_config(self, *args):
"""Connect the volume to a fake device."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_name = "fake"
conf.target_dev = "fake"
conf.target_bus = "fake"
return conf
def connect_volume(self, *args):
"""Connect the volume to a fake device."""
return self.get_config()
class FakeConfigGuestDisk(object):
def __init__(self, *args, **kwargs):
self.source_type = None
self.driver_cache = None
class FakeConfigGuest(object):
def __init__(self, *args, **kwargs):
self.driver_cache = None
class FakeNodeDevice(object):
def __init__(self, fakexml):
self.xml = fakexml
def XMLDesc(self, flags):
return self.xml
def _create_test_instance():
flavor = objects.Flavor(memory_mb=2048,
swap=0,
vcpu_weight=None,
root_gb=1,
id=2,
name=u'm1.small',
ephemeral_gb=0,
rxtx_factor=1.0,
flavorid=u'1',
vcpus=1,
extra_specs={})
return {
'id': 1,
'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
'display_name': "Acme webserver",
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
'instance_type_id': '5', # m1.small
'extra_specs': {},
'system_metadata': {
'image_disk_format': 'raw',
},
'flavor': flavor,
'new_flavor': None,
'old_flavor': None,
'pci_devices': objects.PciDeviceList(),
'numa_topology': None,
'config_drive': None,
'vm_mode': None,
'kernel_id': None,
'ramdisk_id': None,
'os_type': 'linux',
'user_id': '838a72b0-0d54-4827-8fd6-fb1227633ceb',
'ephemeral_key_uuid': None,
'vcpu_model': None,
'host': 'fake-host',
}
class LibvirtConnTestCase(test.NoDBTestCase):
REQUIRES_LOCKING = True
_EPHEMERAL_20_DEFAULT = ('ephemeral_20_%s' %
utils.get_hash_str(disk._DEFAULT_FILE_SYSTEM)[:7])
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
self.flags(fake_call=True)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.get_admin_context()
temp_dir = self.useFixture(fixtures.TempDir()).path
self.flags(instances_path=temp_dir)
self.flags(snapshots_directory=temp_dir, group='libvirt')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.flags(sysinfo_serial="hardware", group="libvirt")
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def fake_extend(image, size, use_cow=False):
pass
self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
self.stubs.Set(imagebackend.Image, 'resolve_driver_format',
imagebackend.Image._get_driver_format)
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.test_instance = _create_test_instance()
self.test_image_meta = {
"disk_format": "raw",
}
self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
self.stubs)
self.device_xml_tmpl = """
<domain type='kvm'>
<devices>
<disk type='block' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<source dev='{device_path}'/>
<target bus='virtio' dev='vdb'/>
<serial>58a84f6d-3f0c-4e19-a0af-eb657b790657</serial>
<address type='pci' domain='0x0' bus='0x0' slot='0x04' \
function='0x0'/>
</disk>
</devices>
</domain>
"""
def relpath(self, path):
return os.path.relpath(path, CONF.instances_path)
def tearDown(self):
nova.tests.unit.image.fake.FakeImageService_reset()
super(LibvirtConnTestCase, self).tearDown()
def test_driver_capabilities(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertTrue(drvr.capabilities['has_imagecache'],
'Driver capabilities for \'has_imagecache\' '
'is invalid')
self.assertTrue(drvr.capabilities['supports_recreate'],
'Driver capabilities for \'supports_recreate\' '
'is invalid')
self.assertFalse(drvr.capabilities['supports_migrate_to_same_host'],
'Driver capabilities for '
'\'supports_migrate_to_same_host\' is invalid')
def create_fake_libvirt_mock(self, **kwargs):
"""Defining mocks for LibvirtDriver(libvirt is not used)."""
# A fake libvirt.virConnect
class FakeLibvirtDriver(object):
def defineXML(self, xml):
return FakeVirtDomain()
# Creating mocks
volume_driver = ['iscsi=nova.tests.unit.virt.libvirt.test_driver'
'.FakeVolumeDriver']
fake = FakeLibvirtDriver()
# Customizing above fake if necessary
for key, val in kwargs.items():
fake.__setattr__(key, val)
self.stubs.Set(libvirt_driver.LibvirtDriver, '_conn', fake)
self.stubs.Set(libvirt_driver.LibvirtDriver, '_get_volume_drivers',
lambda x: volume_driver)
self.stubs.Set(host.Host, 'get_connection', lambda x: fake)
def fake_lookup(self, instance_name):
return FakeVirtDomain()
def fake_execute(self, *args, **kwargs):
open(args[-1], "a").close()
def _create_service(self, **kwargs):
service_ref = {'host': kwargs.get('host', 'dummy'),
'disabled': kwargs.get('disabled', False),
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0}
return objects.Service(**service_ref)
def _get_pause_flag(self, drvr, network_info, power_on=True,
vifs_already_plugged=False):
timeout = CONF.vif_plugging_timeout
events = []
if (drvr._conn_supports_start_paused and
utils.is_neutron() and
not vifs_already_plugged and
power_on and timeout):
events = drvr._get_neutron_events(network_info)
return bool(events)
def test_public_api_signatures(self):
baseinst = driver.ComputeDriver(None)
inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertPublicAPISignatures(baseinst, inst)
def test_legacy_block_device_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertFalse(drvr.need_legacy_block_device_info)
@mock.patch.object(host.Host, "has_min_version")
def test_min_version_start_ok(self, mock_version):
mock_version.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
@mock.patch.object(host.Host, "has_min_version")
def test_min_version_start_abort(self, mock_version):
mock_version.return_value = False
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.init_host,
"dummyhost")
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) - 1)
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_next_min_version_deprecation_warning(self, mock_warning,
mock_get_libversion):
# Test that a warning is logged if the libvirt version is less than
# the next required minimum version.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
# assert that the next min version is in a warning message
expected_arg = {'version': '0.10.2'}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertTrue(version_arg_found)
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_next_min_version_ok(self, mock_warning, mock_get_libversion):
# Test that a warning is not logged if the libvirt version is greater
# than or equal to NEXT_MIN_LIBVIRT_VERSION.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
# assert that the next min version is in a warning message
expected_arg = {'version': '0.10.2'}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertFalse(version_arg_found)
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_KVM_S390_VERSION) - 1)
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_S390_VERSION))
@mock.patch.object(arch, "from_host", return_value=arch.S390X)
def test_min_version_s390_old_libvirt(self, mock_arch,
mock_qemu_version, mock_lv_version):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.init_host,
"dummyhost")
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_KVM_S390_VERSION))
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_S390_VERSION) - 1)
@mock.patch.object(arch, "from_host", return_value=arch.S390X)
def test_min_version_s390_old_qemu(self, mock_arch,
mock_qemu_version, mock_lv_version):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.init_host,
"dummyhost")
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_KVM_S390_VERSION))
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_S390_VERSION))
@mock.patch.object(arch, "from_host", return_value=arch.S390X)
def test_min_version_s390_ok(self, mock_arch,
mock_qemu_version, mock_lv_version):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password(self, mock_get_guest, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_get_guest.return_value = mock_guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.set_admin_password(instance, "123")
mock_guest.set_user_password.assert_called_once_with("root", "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password_windows(self, mock_get_guest, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
instance.os_type = "windows"
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_get_guest.return_value = mock_guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.set_admin_password(instance, "123")
mock_guest.set_user_password.assert_called_once_with(
"Administrator", "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password_image(self, mock_get_guest, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes",
"os_admin_user": "foo"
}}
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_get_guest.return_value = mock_guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.set_admin_password(instance, "123")
mock_guest.set_user_password.assert_called_once_with("foo", "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=False)
def test_set_admin_password_bad_version(self, mock_svc, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.SetAdminPasswdNotSupported,
drvr.set_admin_password, instance, "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_set_admin_password_bad_hyp(self, mock_svc, mock_image):
self.flags(virt_type='foo', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.SetAdminPasswdNotSupported,
drvr.set_admin_password, instance, "123")
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_set_admin_password_guest_agent_not_running(self, mock_svc):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.QemuGuestAgentNotEnabled,
drvr.set_admin_password, instance, "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password_error(self, mock_get_guest, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_guest.set_user_password.side_effect = (
fakelibvirt.libvirtError("error"))
mock_get_guest.return_value = mock_guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.set_admin_password, instance, "123")
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_disable(self, mock_svc):
# Tests disabling an enabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(False)
self.assertTrue(svc.disabled)
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_enable(self, mock_svc):
# Tests enabling a disabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(disabled=True, host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(True)
self.assertTrue(svc.disabled)
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_enable_state_enabled(self, mock_svc):
# Tests enabling an enabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(disabled=False, host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(True)
self.assertFalse(svc.disabled)
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_disable_state_disabled(self, mock_svc):
# Tests disabling a disabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(disabled=True, host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(False)
self.assertTrue(svc.disabled)
def test_set_host_enabled_swallows_exceptions(self):
# Tests that set_host_enabled will swallow exceptions coming from the
# db_api code so they don't break anything calling it, e.g. the
# _get_new_connection method.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(db, 'service_get_by_compute_host') as db_mock:
# Make db.service_get_by_compute_host raise NovaException; this
# is more robust than just raising ComputeHostNotFound.
db_mock.side_effect = exception.NovaException
drvr._set_host_enabled(False)
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
def test_prepare_pci_device(self, mock_lookup):
pci_devices = [dict(hypervisor_name='xxx')]
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn = drvr._host.get_connection()
mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn)
drvr._prepare_pci_devices_for_use(pci_devices)
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
@mock.patch.object(fakelibvirt.virNodeDevice, "dettach")
def test_prepare_pci_device_exception(self, mock_detach, mock_lookup):
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid')]
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn = drvr._host.get_connection()
mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn)
mock_detach.side_effect = fakelibvirt.libvirtError("xxxx")
self.assertRaises(exception.PciDevicePrepareFailed,
drvr._prepare_pci_devices_for_use, pci_devices)
def test_detach_pci_devices_exception(self):
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid')]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(host.Host,
'has_min_version')
host.Host.has_min_version = lambda x, y: False
self.assertRaises(exception.PciDeviceDetachFailed,
drvr._detach_pci_devices, None, pci_devices)
def test_detach_pci_devices(self):
fake_domXML1 =\
"""<domain> <devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='none'/>
<source file='xxx'/>
<target dev='vda' bus='virtio'/>
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x04' function='0x0'/>
</disk>
<hostdev mode="subsystem" type="pci" managed="yes">
<source>
<address function="0x1" slot="0x10" domain="0x0000"
bus="0x04"/>
</source>
</hostdev></devices></domain>"""
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid',
address="0001:04:10:1")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(host.Host,
'has_min_version')
host.Host.has_min_version = lambda x, y: True
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_get_guest_pci_device')
class FakeDev(object):
def to_xml(self):
pass
libvirt_driver.LibvirtDriver._get_guest_pci_device =\
lambda x, y: FakeDev()
class FakeDomain(object):
def detachDeviceFlags(self, xml, flags):
pci_devices[0]['hypervisor_name'] = 'marked'
pass
def XMLDesc(self, flags):
return fake_domXML1
guest = libvirt_guest.Guest(FakeDomain())
drvr._detach_pci_devices(guest, pci_devices)
self.assertEqual(pci_devices[0]['hypervisor_name'], 'marked')
def test_detach_pci_devices_timeout(self):
fake_domXML1 =\
"""<domain>
<devices>
<hostdev mode="subsystem" type="pci" managed="yes">
<source>
<address function="0x1" slot="0x10" domain="0x0000" bus="0x04"/>
</source>
</hostdev>
</devices>
</domain>"""
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid',
address="0000:04:10:1")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(host.Host,
'has_min_version')
host.Host.has_min_version = lambda x, y: True
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_get_guest_pci_device')
class FakeDev(object):
def to_xml(self):
pass
libvirt_driver.LibvirtDriver._get_guest_pci_device =\
lambda x, y: FakeDev()
class FakeDomain(object):
def detachDeviceFlags(self, xml, flags):
pass
def XMLDesc(self, flags):
return fake_domXML1
guest = libvirt_guest.Guest(FakeDomain())
self.assertRaises(exception.PciDeviceDetachFailed,
drvr._detach_pci_devices, guest, pci_devices)
@mock.patch.object(connector, 'get_connector_properties')
def test_get_connector(self, fake_get_connector):
initiator = 'fake.initiator.iqn'
ip = 'fakeip'
host = 'fakehost'
wwpns = ['100010604b019419']
wwnns = ['200010604b019419']
self.flags(my_ip=ip)
self.flags(host=host)
expected = {
'ip': ip,
'initiator': initiator,
'host': host,
'wwpns': wwpns,
'wwnns': wwnns
}
volume = {
'id': 'fake'
}
# TODO(walter-boring) add the fake in os-brick
fake_get_connector.return_value = expected
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
result = drvr.get_volume_connector(volume)
self.assertThat(expected, matchers.DictMatches(result))
@mock.patch.object(connector, 'get_connector_properties')
def test_get_connector_storage_ip(self, fake_get_connector):
ip = '100.100.100.100'
storage_ip = '101.101.101.101'
self.flags(my_block_storage_ip=storage_ip, my_ip=ip)
volume = {
'id': 'fake'
}
expected = {
'ip': storage_ip
}
# TODO(walter-boring) add the fake in os-brick
fake_get_connector.return_value = expected
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
result = drvr.get_volume_connector(volume)
self.assertEqual(storage_ip, result['ip'])
def test_lifecycle_event_registration(self):
calls = []
def fake_registerErrorHandler(*args, **kwargs):
calls.append('fake_registerErrorHandler')
def fake_get_host_capabilities(**args):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = arch.ARMV7
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
calls.append('fake_get_host_capabilities')
return caps
@mock.patch.object(fakelibvirt, 'registerErrorHandler',
side_effect=fake_registerErrorHandler)
@mock.patch.object(host.Host, "get_capabilities",
side_effect=fake_get_host_capabilities)
def test_init_host(get_host_capabilities, register_error_handler):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("test_host")
test_init_host()
# NOTE(dkliban): Will fail if get_host_capabilities is called before
# registerErrorHandler
self.assertEqual(['fake_registerErrorHandler',
'fake_get_host_capabilities'], calls)
def test_sanitize_log_to_xml(self):
# setup fake data
data = {'auth_password': 'scrubme'}
bdm = [{'connection_info': {'data': data}}]
bdi = {'block_device_mapping': bdm}
# Tests that the parameters to the _get_guest_xml method
# are sanitized for passwords when logged.
def fake_debug(*args, **kwargs):
if 'auth_password' in args[0]:
self.assertNotIn('scrubme', args[0])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conf = mock.Mock()
with test.nested(
mock.patch.object(libvirt_driver.LOG, 'debug',
side_effect=fake_debug),
mock.patch.object(drvr, '_get_guest_config', return_value=conf)
) as (
debug_mock, conf_mock
):
drvr._get_guest_xml(self.context, self.test_instance,
network_info={}, disk_info={},
image_meta={}, block_device_info=bdi)
# we don't care what the log message is, we just want to make sure
# our stub method is called which asserts the password is scrubbed
self.assertTrue(debug_mock.called)
@mock.patch.object(time, "time")
def test_get_guest_config(self, time_mock):
time_mock.return_value = 1234567.89
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
test_instance = copy.deepcopy(self.test_instance)
test_instance["display_name"] = "purple tomatoes"
ctxt = context.RequestContext(project_id=123,
project_name="aubergine",
user_id=456,
user_name="pie")
flavor = objects.Flavor(name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs={})
instance_ref = objects.Instance(**test_instance)
instance_ref.flavor = flavor
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info,
context=ctxt)
self.assertEqual(cfg.uuid, instance_ref["uuid"])
self.assertEqual(2, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertEqual(cfg.memory, 6 * units.Ki)
self.assertEqual(cfg.vcpus, 28)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
self.assertIsNone(cfg.os_root)
self.assertEqual(len(cfg.devices), 10)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(len(cfg.metadata), 1)
self.assertIsInstance(cfg.metadata[0],
vconfig.LibvirtConfigGuestMetaNovaInstance)
self.assertEqual(version.version_string_with_package(),
cfg.metadata[0].package)
self.assertEqual("purple tomatoes",
cfg.metadata[0].name)
self.assertEqual(1234567.89,
cfg.metadata[0].creationTime)
self.assertEqual("image",
cfg.metadata[0].roottype)
self.assertEqual(str(instance_ref["image_ref"]),
cfg.metadata[0].rootid)
self.assertIsInstance(cfg.metadata[0].owner,
vconfig.LibvirtConfigGuestMetaNovaOwner)
self.assertEqual(456,
cfg.metadata[0].owner.userid)
self.assertEqual("pie",
cfg.metadata[0].owner.username)
self.assertEqual(123,
cfg.metadata[0].owner.projectid)
self.assertEqual("aubergine",
cfg.metadata[0].owner.projectname)
self.assertIsInstance(cfg.metadata[0].flavor,
vconfig.LibvirtConfigGuestMetaNovaFlavor)
self.assertEqual("m1.small",
cfg.metadata[0].flavor.name)
self.assertEqual(6,
cfg.metadata[0].flavor.memory)
self.assertEqual(28,
cfg.metadata[0].flavor.vcpus)
self.assertEqual(496,
cfg.metadata[0].flavor.disk)
self.assertEqual(8128,
cfg.metadata[0].flavor.ephemeral)
self.assertEqual(33550336,
cfg.metadata[0].flavor.swap)
def test_get_guest_config_lxc(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, {'mapping': {}})
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.EXE, cfg.os_type)
self.assertEqual("/sbin/init", cfg.os_init_path)
self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline)
self.assertIsNone(cfg.os_root)
self.assertEqual(3, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestFilesys)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
def test_get_guest_config_lxc_with_id_maps(self):
self.flags(virt_type='lxc', group='libvirt')
self.flags(uid_maps=['0:1000:100'], group='libvirt')
self.flags(gid_maps=['0:1000:100'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, {'mapping': {}})
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.EXE, cfg.os_type)
self.assertEqual("/sbin/init", cfg.os_init_path)
self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline)
self.assertIsNone(cfg.os_root)
self.assertEqual(3, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestFilesys)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
self.assertEqual(len(cfg.idmaps), 2)
self.assertIsInstance(cfg.idmaps[0],
vconfig.LibvirtConfigGuestUIDMap)
self.assertIsInstance(cfg.idmaps[1],
vconfig.LibvirtConfigGuestGIDMap)
def test_get_guest_config_numa_host_instance_fits(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps)):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
def test_get_guest_config_numa_host_instance_no_fit(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([3])),
mock.patch.object(random, 'choice')
) as (get_host_cap_mock,
get_vcpu_pin_set_mock, choice_mock):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertFalse(choice_mock.called)
self.assertEqual(set([3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
def _test_get_guest_memory_backing_config(
self, host_topology, inst_topology, numatune):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(
drvr, "_get_host_numa_topology",
return_value=host_topology):
return drvr._get_guest_memory_backing_config(
inst_topology, numatune)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_get_guest_memory_backing_config_large_success(self, mock_version):
host_topology = objects.NUMATopology(
cells=[
objects.NUMACell(
id=3, cpuset=set([1]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=2000,
used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512,
used=0),
objects.NUMAPagesTopology(size_kb=1048576, total=0,
used=0),
])])
inst_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024, pagesize=2048)])
numa_tune = vconfig.LibvirtConfigGuestNUMATune()
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
numa_tune.memnodes[0].cellid = 0
numa_tune.memnodes[0].nodeset = [3]
result = self._test_get_guest_memory_backing_config(
host_topology, inst_topology, numa_tune)
self.assertEqual(1, len(result.hugepages))
self.assertEqual(2048, result.hugepages[0].size_kb)
self.assertEqual([0], result.hugepages[0].nodeset)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_get_guest_memory_backing_config_smallest(self, mock_version):
host_topology = objects.NUMATopology(
cells=[
objects.NUMACell(
id=3, cpuset=set([1]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=2000,
used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512,
used=0),
objects.NUMAPagesTopology(size_kb=1048576, total=0,
used=0),
])])
inst_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024, pagesize=4)])
numa_tune = vconfig.LibvirtConfigGuestNUMATune()
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
numa_tune.memnodes[0].cellid = 0
numa_tune.memnodes[0].nodeset = [3]
result = self._test_get_guest_memory_backing_config(
host_topology, inst_topology, numa_tune)
self.assertIsNone(result)
def test_get_guest_config_numa_host_instance_pci_no_numa_info(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status=fields.PciDeviceStatus.AVAILABLE,
address='0000:00:00.1',
instance_uuid=None,
request_id=None,
extra_info={},
numa_node=None)
pci_device = objects.PciDevice(**pci_device_info)
with test.nested(
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(
host.Host, "get_capabilities", return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([3])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
mock.patch.object(pci_manager, "get_instance_pci_devs",
return_value=[pci_device])):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(set([3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
def test_get_guest_config_numa_host_instance_2pci_no_fit(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status=fields.PciDeviceStatus.AVAILABLE,
address='0000:00:00.1',
instance_uuid=None,
request_id=None,
extra_info={},
numa_node=1)
pci_device = objects.PciDevice(**pci_device_info)
pci_device_info.update(numa_node=0, address='0000:00:00.2')
pci_device2 = objects.PciDevice(**pci_device_info)
with test.nested(
mock.patch.object(
host.Host, "get_capabilities", return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([3])),
mock.patch.object(random, 'choice'),
mock.patch.object(pci_manager, "get_instance_pci_devs",
return_value=[pci_device, pci_device2])
) as (get_host_cap_mock,
get_vcpu_pin_set_mock, choice_mock, pci_mock):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertFalse(choice_mock.called)
self.assertEqual(set([3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
@mock.patch.object(host.Host, 'get_capabilities')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled')
def _test_get_guest_config_numa_unsupported(self, fake_lib_version,
fake_version, fake_type,
fake_arch, exception_class,
pagesize, mock_host,
mock_caps, mock_lib_version,
mock_version, mock_type):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([0]),
memory=1024, pagesize=pagesize)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fake_arch
caps.host.topology = self._fake_caps_numa_topology()
mock_type.return_value = fake_type
mock_version.return_value = fake_version
mock_lib_version.return_value = fake_lib_version
mock_caps.return_value = caps
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(exception_class,
drvr._get_guest_config,
instance_ref, [],
image_meta, disk_info)
def test_get_guest_config_numa_old_version_libvirt(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1,
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_bad_version_libvirt(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.BAD_LIBVIRT_NUMA_VERSIONS[0]),
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
@mock.patch.object(libvirt_driver.LOG, 'warn')
def test_has_numa_support_bad_version_libvirt_log(self, mock_warn):
# Tests that a warning is logged once and only once when there is a bad
# BAD_LIBVIRT_NUMA_VERSIONS detected.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertFalse(hasattr(drvr, '_bad_libvirt_numa_version_warn'))
with mock.patch.object(drvr._host, 'has_version', return_value=True):
for i in xrange(2):
self.assertFalse(drvr._has_numa_support())
self.assertTrue(drvr._bad_libvirt_numa_version_warn)
self.assertEqual(1, mock_warn.call_count)
# assert the version is logged properly
self.assertEqual('1.2.9.2', mock_warn.call_args[0][1])
def test_get_guest_config_numa_old_version_qemu(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION),
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1,
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_other_arch_qemu(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION),
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.PPC64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_xen(self):
self.flags(virt_type='xen', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION),
utils.convert_version_to_int((4, 5, 0)),
'XEN',
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_old_pages_libvirt(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1,
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.MemoryPagesUnsupported,
2048)
def test_get_guest_config_numa_old_pages_qemu(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION),
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1,
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
2048)
def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=1024, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology(kb_mem=4194304)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([2, 3])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8)))
) as (has_min_version_mock, get_host_cap_mock,
get_vcpu_pin_set_mock, get_online_cpus_mock):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
# NOTE(ndipanov): we make sure that pin_set was taken into account
# when choosing viable cells
self.assertEqual(set([2, 3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
def test_get_guest_config_non_numa_host_instance_topo(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024),
objects.InstanceNUMACell(
id=1, cpuset=set([2]), memory=1024)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps)):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.numatune)
self.assertIsNotNone(cfg.cpu.numa)
for instance_cell, numa_cfg_cell in zip(
instance_topology.cells, cfg.cpu.numa.cells):
self.assertEqual(instance_cell.id, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
def test_get_guest_config_numa_host_instance_topo(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]), memory=1024, pagesize=None),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]), memory=1024,
pagesize=None)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([2, 3, 4, 5])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
# Test that the pinning is correct and limited to allowed only
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[3].cpuset)
self.assertIsNotNone(cfg.cpu.numa)
self.assertIsInstance(cfg.cputune.emulatorpin,
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset)
for instance_cell, numa_cfg_cell, index in zip(
instance_topology.cells,
cfg.cpu.numa.cells,
range(len(instance_topology.cells))):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
allnodes = [cell.id for cell in instance_topology.cells]
self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
self.assertEqual("strict", cfg.numatune.memory.mode)
for instance_cell, memnode, index in zip(
instance_topology.cells,
cfg.numatune.memnodes,
range(len(instance_topology.cells))):
self.assertEqual(index, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_guest_config_numa_host_instance_topo_reordered(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024),
objects.InstanceNUMACell(
id=0, cpuset=set([2, 3]), memory=1024)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
# Test that the pinning is correct and limited to allowed only
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[3].cpuset)
self.assertIsNotNone(cfg.cpu.numa)
self.assertIsInstance(cfg.cputune.emulatorpin,
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
self.assertEqual(set([0, 1, 6, 7]), cfg.cputune.emulatorpin.cpuset)
for index, (instance_cell, numa_cfg_cell) in enumerate(zip(
instance_topology.cells,
cfg.cpu.numa.cells)):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
self.assertIsNone(numa_cfg_cell.memAccess)
allnodes = set([cell.id for cell in instance_topology.cells])
self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset))
self.assertEqual("strict", cfg.numatune.memory.mode)
for index, (instance_cell, memnode) in enumerate(zip(
instance_topology.cells,
cfg.numatune.memnodes)):
self.assertEqual(index, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_guest_config_numa_host_instance_topo_cpu_pinning(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]), memory=1024,
cpu_pinning={0: 24, 1: 25}),
objects.InstanceNUMACell(
id=0, cpuset=set([2, 3]), memory=1024,
cpu_pinning={2: 0, 3: 1})])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology(
sockets_per_cell=4, cores_per_socket=3, threads_per_core=2)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
# Test that the pinning is correct and limited to allowed only
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
self.assertEqual(set([24]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
self.assertEqual(set([25]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
self.assertEqual(set([0]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
self.assertEqual(set([1]), cfg.cputune.vcpupin[3].cpuset)
self.assertIsNotNone(cfg.cpu.numa)
# Emulator must be pinned to union of cfg.cputune.vcpupin[*].cpuset
self.assertIsInstance(cfg.cputune.emulatorpin,
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
self.assertEqual(set([0, 1, 24, 25]),
cfg.cputune.emulatorpin.cpuset)
for i, (instance_cell, numa_cfg_cell) in enumerate(zip(
instance_topology.cells, cfg.cpu.numa.cells)):
self.assertEqual(i, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
self.assertIsNone(numa_cfg_cell.memAccess)
allnodes = set([cell.id for cell in instance_topology.cells])
self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset))
self.assertEqual("strict", cfg.numatune.memory.mode)
for i, (instance_cell, memnode) in enumerate(zip(
instance_topology.cells, cfg.numatune.memnodes)):
self.assertEqual(i, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_guest_config_numa_host_mempages_shared(self):
instance_topology = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]),
memory=1024, pagesize=2048),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]),
memory=1024, pagesize=2048)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([2, 3, 4, 5])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for instance_cell, numa_cfg_cell, index in zip(
instance_topology.cells,
cfg.cpu.numa.cells,
range(len(instance_topology.cells))):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
self.assertEqual("shared", numa_cfg_cell.memAccess)
allnodes = [cell.id for cell in instance_topology.cells]
self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
self.assertEqual("strict", cfg.numatune.memory.mode)
for instance_cell, memnode, index in zip(
instance_topology.cells,
cfg.numatune.memnodes,
range(len(instance_topology.cells))):
self.assertEqual(index, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_cpu_numa_config_from_instance(self):
topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128),
])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conf = drvr._get_cpu_numa_config_from_instance(topology, True)
self.assertIsInstance(conf, vconfig.LibvirtConfigGuestCPUNUMA)
self.assertEqual(0, conf.cells[0].id)
self.assertEqual(set([1, 2]), conf.cells[0].cpus)
self.assertEqual(131072, conf.cells[0].memory)
self.assertEqual("shared", conf.cells[0].memAccess)
self.assertEqual(1, conf.cells[1].id)
self.assertEqual(set([3, 4]), conf.cells[1].cpus)
self.assertEqual(131072, conf.cells[1].memory)
self.assertEqual("shared", conf.cells[1].memAccess)
def test_get_cpu_numa_config_from_instance_none(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conf = drvr._get_cpu_numa_config_from_instance(None, False)
self.assertIsNone(conf)
@mock.patch.object(host.Host, 'has_version', return_value=True)
def test_has_cpu_policy_support(self, mock_has_version):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.CPUPinningNotSupported,
drvr._has_cpu_policy_support)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support",
return_value=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_hugepage_support",
return_value=True)
@mock.patch.object(host.Host, "get_capabilities")
def test_does_not_want_hugepages(self, mock_caps, mock_numa, mock_hp):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_topology = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]),
memory=1024, pagesize=4),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]),
memory=1024, pagesize=4)])
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
mock_caps.return_value = caps
host_topology = drvr._get_host_numa_topology()
self.assertFalse(drvr._wants_hugepages(None, None))
self.assertFalse(drvr._wants_hugepages(host_topology, None))
self.assertFalse(drvr._wants_hugepages(None, instance_topology))
self.assertFalse(drvr._wants_hugepages(host_topology,
instance_topology))
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support",
return_value=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_hugepage_support",
return_value=True)
@mock.patch.object(host.Host, "get_capabilities")
def test_does_want_hugepages(self, mock_caps, mock_numa, mock_hp):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_topology = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]),
memory=1024, pagesize=2048),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]),
memory=1024, pagesize=2048)])
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
mock_caps.return_value = caps
host_topology = drvr._get_host_numa_topology()
self.assertTrue(drvr._wants_hugepages(host_topology,
instance_topology))
def test_get_guest_config_clock(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
hpet_map = {
arch.X86_64: True,
arch.I686: True,
arch.PPC: False,
arch.PPC64: False,
arch.ARMV7: False,
arch.AARCH64: False,
}
for guestarch, expect_hpet in hpet_map.items():
with mock.patch.object(libvirt_driver.libvirt_utils,
'get_arch',
return_value=guestarch):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta,
disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "utc")
self.assertIsInstance(cfg.clock.timers[0],
vconfig.LibvirtConfigGuestTimer)
self.assertIsInstance(cfg.clock.timers[1],
vconfig.LibvirtConfigGuestTimer)
self.assertEqual(cfg.clock.timers[0].name, "pit")
self.assertEqual(cfg.clock.timers[0].tickpolicy,
"delay")
self.assertEqual(cfg.clock.timers[1].name, "rtc")
self.assertEqual(cfg.clock.timers[1].tickpolicy,
"catchup")
if expect_hpet:
self.assertEqual(3, len(cfg.clock.timers))
self.assertIsInstance(cfg.clock.timers[2],
vconfig.LibvirtConfigGuestTimer)
self.assertEqual('hpet', cfg.clock.timers[2].name)
self.assertFalse(cfg.clock.timers[2].present)
else:
self.assertEqual(2, len(cfg.clock.timers))
@mock.patch.object(libvirt_utils, 'get_arch')
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows(self, mock_version, mock_get_arch):
mock_version.return_value = False
mock_get_arch.return_value = arch.I686
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(3, len(cfg.clock.timers), cfg.clock.timers)
self.assertEqual("pit", cfg.clock.timers[0].name)
self.assertEqual("rtc", cfg.clock.timers[1].name)
self.assertEqual("hpet", cfg.clock.timers[2].name)
self.assertFalse(cfg.clock.timers[2].present)
@mock.patch.object(libvirt_utils, 'get_arch')
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows_timer(self, mock_version, mock_get_arch):
mock_version.return_value = True
mock_get_arch.return_value = arch.I686
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(4, len(cfg.clock.timers), cfg.clock.timers)
self.assertEqual("pit", cfg.clock.timers[0].name)
self.assertEqual("rtc", cfg.clock.timers[1].name)
self.assertEqual("hpet", cfg.clock.timers[2].name)
self.assertFalse(cfg.clock.timers[2].present)
self.assertEqual("hypervclock", cfg.clock.timers[3].name)
self.assertTrue(cfg.clock.timers[3].present)
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureHyperV)
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows_hyperv_feature1(self, mock_version):
def fake_version(lv_ver=None, hv_ver=None, hv_type=None):
if lv_ver == (1, 0, 0) and hv_ver == (1, 1, 0):
return True
return False
mock_version.side_effect = fake_version
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureHyperV)
self.assertTrue(cfg.features[2].relaxed)
self.assertFalse(cfg.features[2].spinlocks)
self.assertFalse(cfg.features[2].vapic)
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows_hyperv_feature2(self, mock_version):
mock_version.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureHyperV)
self.assertTrue(cfg.features[2].relaxed)
self.assertTrue(cfg.features[2].spinlocks)
self.assertEqual(8191, cfg.features[2].spinlock_retries)
self.assertTrue(cfg.features[2].vapic)
def test_get_guest_config_with_two_nics(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 2),
image_meta, disk_info)
self.assertEqual(2, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertEqual(cfg.memory, 2 * units.Mi)
self.assertEqual(cfg.vcpus, 1)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
self.assertIsNone(cfg.os_root)
self.assertEqual(len(cfg.devices), 10)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
def test_get_guest_config_bug_1118829(self):
self.flags(virt_type='uml', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
disk_info = {'disk_bus': 'virtio',
'cdrom_bus': 'ide',
'mapping': {u'vda': {'bus': 'virtio',
'type': 'disk',
'dev': u'vda'},
'root': {'bus': 'virtio',
'type': 'disk',
'dev': 'vda'}}}
# NOTE(jdg): For this specific test leave this blank
# This will exercise the failed code path still,
# and won't require fakes and stubs of the iscsi discovery
block_device_info = {}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
drvr._get_guest_config(instance_ref, [], image_meta, disk_info,
None, block_device_info)
self.assertEqual(instance_ref['root_device_name'], '/dev/vda')
def test_get_guest_config_with_root_device_name(self):
self.flags(virt_type='uml', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {'root_device_name': '/dev/vdb'}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
block_device_info)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info,
None, block_device_info)
self.assertEqual(0, len(cfg.features))
self.assertEqual(cfg.memory, 2 * units.Mi)
self.assertEqual(cfg.vcpus, 1)
self.assertEqual(cfg.os_type, "uml")
self.assertEqual(cfg.os_boot_dev, [])
self.assertEqual(cfg.os_root, '/dev/vdb')
self.assertEqual(len(cfg.devices), 3)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
def test_get_guest_config_with_block_device(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/vdc'}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/vdd'}),
])}
info['block_device_mapping'][0]['connection_info'] = conn_info
info['block_device_mapping'][1]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
info)
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info,
None, info)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'vdc')
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[3].target_dev, 'vdd')
mock_save.assert_called_with()
def test_get_guest_config_lxc_with_attached_volume(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'boot_index': 0}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 3,
'source_type': 'volume', 'destination_type': 'volume',
}),
])}
info['block_device_mapping'][0]['connection_info'] = conn_info
info['block_device_mapping'][1]['connection_info'] = conn_info
info['block_device_mapping'][2]['connection_info'] = conn_info
info['block_device_mapping'][0]['mount_device'] = '/dev/vda'
info['block_device_mapping'][1]['mount_device'] = '/dev/vdc'
info['block_device_mapping'][2]['mount_device'] = '/dev/vdd'
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
info)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info,
None, info)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[1].target_dev, 'vdc')
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'vdd')
mock_save.assert_called_with()
def test_get_guest_config_with_configdrive(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
# make configdrive.required_by() return True
instance_ref['config_drive'] = True
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
# The last device is selected for this. on x86 is the last ide
# device (hdd). Since power only support scsi, the last device
# is sdz
expect = {"ppc": "sdz", "ppc64": "sdz"}
disk = expect.get(blockinfo.libvirt_utils.get_arch({}), "hdd")
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, disk)
def test_get_guest_config_with_virtio_scsi_bus(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_scsi_model": "virtio-scsi"}})
instance_ref = objects.Instance(**self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
[])
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestController)
self.assertEqual(cfg.devices[2].model, 'virtio-scsi')
def test_get_guest_config_with_virtio_scsi_bus_bdm(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_scsi_model": "virtio-scsi"}})
instance_ref = objects.Instance(**self.test_instance)
conn_info = {'driver_volume_type': 'fake'}
bd_info = {
'block_device_mapping': driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sdc', 'disk_bus': 'scsi'}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sdd', 'disk_bus': 'scsi'}),
])}
bd_info['block_device_mapping'][0]['connection_info'] = conn_info
bd_info['block_device_mapping'][1]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
bd_info)
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
cfg = drvr._get_guest_config(instance_ref, [], image_meta,
disk_info, [], bd_info)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'sdc')
self.assertEqual(cfg.devices[2].target_bus, 'scsi')
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[3].target_dev, 'sdd')
self.assertEqual(cfg.devices[3].target_bus, 'scsi')
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestController)
self.assertEqual(cfg.devices[4].model, 'virtio-scsi')
mock_save.assert_called_with()
def test_get_guest_config_with_vnc(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False, group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 7)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "vnc")
def test_get_guest_config_with_vnc_and_tablet(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=False, group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
def test_get_guest_config_with_spice_and_tablet(self):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=True,
agent_enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "spice")
def test_get_guest_config_with_spice_and_agent(self):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[5].type, "spice")
self.assertEqual(cfg.devices[6].type, "qxl")
@mock.patch('nova.console.serial.acquire_port')
@mock.patch('nova.virt.hardware.get_number_of_serial_ports',
return_value=1)
@mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',)
def test_create_serial_console_devices_based_on_arch(self, mock_get_arch,
mock_get_port_number,
mock_acquire_port):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
expected = {arch.X86_64: vconfig.LibvirtConfigGuestSerial,
arch.S390: vconfig.LibvirtConfigGuestConsole,
arch.S390X: vconfig.LibvirtConfigGuestConsole}
for guest_arch, device_type in expected.items():
mock_get_arch.return_value = guest_arch
guest = vconfig.LibvirtConfigGuest()
drvr._create_serial_console_devices(guest, instance=None,
flavor={}, image_meta={})
self.assertEqual(1, len(guest.devices))
console_device = guest.devices[0]
self.assertIsInstance(console_device, device_type)
self.assertEqual("tcp", console_device.type)
@mock.patch('nova.console.serial.acquire_port')
def test_get_guest_config_serial_console(self, acquire_port):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
acquire_port.return_value = 11111
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(8, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("tcp", cfg.devices[2].type)
self.assertEqual(11111, cfg.devices[2].listen_port)
def test_get_guest_config_serial_console_through_flavor(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw:serial_port_count': 3}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(10, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("tcp", cfg.devices[2].type)
self.assertEqual("tcp", cfg.devices[3].type)
self.assertEqual("tcp", cfg.devices[4].type)
def test_get_guest_config_serial_console_invalid_flavor(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw:serial_port_count': "a"}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(
exception.ImageSerialPortNumberInvalid,
drvr._get_guest_config, instance_ref, [],
image_meta, disk_info)
def test_get_guest_config_serial_console_image_and_flavor(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_serial_port_count": "3"}})
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw:serial_port_count': 4}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta,
disk_info)
self.assertEqual(10, len(cfg.devices), cfg.devices)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("tcp", cfg.devices[2].type)
self.assertEqual("tcp", cfg.devices[3].type)
self.assertEqual("tcp", cfg.devices[4].type)
@mock.patch('nova.console.serial.acquire_port')
def test_get_guest_config_serial_console_through_port_rng_exhausted(
self, acquire_port):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
acquire_port.side_effect = exception.SocketPortRangeExhaustedException(
'127.0.0.1')
self.assertRaises(
exception.SocketPortRangeExhaustedException,
drvr._get_guest_config, instance_ref, [],
image_meta, disk_info)
@mock.patch('os.path.getsize', return_value=0) # size doesn't matter
@mock.patch('nova.virt.libvirt.storage.lvm.get_volume_size',
return_value='fake-size')
def test_detach_encrypted_volumes(self, mock_getsize,
mock_get_volume_size):
"""Test that unencrypted volumes are not disconnected with dmcrypt."""
instance = objects.Instance(**self.test_instance)
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<driver name='fake-driver' type='fake-type' />
<source file='filename'/>
<target dev='vdc' bus='virtio'/>
</disk>
<disk type='block' device='disk'>
<driver name='fake-driver' type='fake-type' />
<source dev='/dev/mapper/disk'/>
<target dev='vda'/>
</disk>
<disk type='block' device='disk'>
<driver name='fake-driver' type='fake-type' />
<source dev='/dev/mapper/swap'/>
<target dev='vdb'/>
</disk>
</devices>
</domain>
"""
dom = FakeVirtDomain(fake_xml=xml)
instance.ephemeral_key_uuid = 'fake-id' # encrypted
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@mock.patch.object(dmcrypt, 'delete_volume')
@mock.patch.object(conn._host, 'get_domain', return_value=dom)
def detach_encrypted_volumes(block_device_info, mock_get_domain,
mock_delete_volume):
conn._detach_encrypted_volumes(instance, block_device_info)
mock_get_domain.assert_called_once_with(instance)
self.assertFalse(mock_delete_volume.called)
block_device_info = {'root_device_name': '/dev/vda',
'ephemerals': [],
'block_device_mapping': []}
detach_encrypted_volumes(block_device_info)
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
def test_get_serial_ports_from_guest(self, mock_get_xml_desc):
i = self._test_get_serial_ports_from_guest(None,
mock_get_xml_desc)
self.assertEqual([
('127.0.0.1', 100),
('127.0.0.1', 101),
('127.0.0.2', 100),
('127.0.0.2', 101)], list(i))
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
def test_get_serial_ports_from_guest_bind_only(self, mock_get_xml_desc):
i = self._test_get_serial_ports_from_guest('bind',
mock_get_xml_desc)
self.assertEqual([
('127.0.0.1', 101),
('127.0.0.2', 100)], list(i))
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
def test_get_serial_ports_from_guest_connect_only(self,
mock_get_xml_desc):
i = self._test_get_serial_ports_from_guest('connect',
mock_get_xml_desc)
self.assertEqual([
('127.0.0.1', 100),
('127.0.0.2', 101)], list(i))
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
def test_get_serial_ports_from_guest_on_s390(self, mock_get_xml_desc):
i = self._test_get_serial_ports_from_guest(None,
mock_get_xml_desc,
'console')
self.assertEqual([
('127.0.0.1', 100),
('127.0.0.1', 101),
('127.0.0.2', 100),
('127.0.0.2', 101)], list(i))
def _test_get_serial_ports_from_guest(self, mode, mock_get_xml_desc,
dev_name='serial'):
xml = """
<domain type='kvm'>
<devices>
<%(dev_name)s type="tcp">
<source host="127.0.0.1" service="100" mode="connect"/>
</%(dev_name)s>
<%(dev_name)s type="tcp">
<source host="127.0.0.1" service="101" mode="bind"/>
</%(dev_name)s>
<%(dev_name)s type="tcp">
<source host="127.0.0.2" service="100" mode="bind"/>
</%(dev_name)s>
<%(dev_name)s type="tcp">
<source host="127.0.0.2" service="101" mode="connect"/>
</%(dev_name)s>
</devices>
</domain>""" % {'dev_name': dev_name}
mock_get_xml_desc.return_value = xml
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
guest = libvirt_guest.Guest(FakeVirtDomain())
return drvr._get_serial_ports_from_guest(guest, mode=mode)
def test_get_guest_config_with_type_xen(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='xen',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 6)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[3].type, "vnc")
self.assertEqual(cfg.devices[4].type, "xen")
@mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',
return_value=arch.S390X)
def test_get_guest_config_with_type_kvm_on_s390(self, mock_get_arch):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
self._stub_host_capabilities_cpu_arch(arch.S390X)
instance_ref = objects.Instance(**self.test_instance)
cfg = self._get_guest_config_via_fake_api(instance_ref)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
log_file_device = cfg.devices[2]
self.assertIsInstance(log_file_device,
vconfig.LibvirtConfigGuestConsole)
self.assertEqual("sclplm", log_file_device.target_type)
self.assertEqual("file", log_file_device.type)
terminal_device = cfg.devices[3]
self.assertIsInstance(terminal_device,
vconfig.LibvirtConfigGuestConsole)
self.assertEqual("sclp", terminal_device.target_type)
self.assertEqual("pty", terminal_device.type)
self.assertEqual("s390-ccw-virtio", cfg.os_mach_type)
def _stub_host_capabilities_cpu_arch(self, cpu_arch):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = cpu_arch
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
def _get_guest_config_via_fake_api(self, instance):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
return drvr._get_guest_config(instance, [],
image_meta, disk_info)
def test_get_guest_config_with_type_xen_pae_hvm(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='xen',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['vm_mode'] = vm_mode.HVM
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_loader, CONF.libvirt.xen_hvmloader_path)
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeaturePAE)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureAPIC)
def test_get_guest_config_with_type_xen_pae_pvm(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='xen',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(cfg.os_type, vm_mode.XEN)
self.assertEqual(1, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeaturePAE)
def test_get_guest_config_with_vnc_and_spice(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 10)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[6].type, "vnc")
self.assertEqual(cfg.devices[7].type, "spice")
def test_get_guest_config_with_watchdog_action_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_watchdog_action": "none"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 9)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestWatchdog)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("none", cfg.devices[7].action)
def _test_get_guest_usb_tablet(self, vnc_enabled, spice_enabled, os_type,
agent_enabled=False):
self.flags(enabled=vnc_enabled, group='vnc')
self.flags(enabled=spice_enabled,
agent_enabled=agent_enabled, group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
return drvr._get_guest_usb_tablet(os_type)
def test_get_guest_usb_tablet_wipe(self):
self.flags(use_usb_tablet=True, group='libvirt')
tablet = self._test_get_guest_usb_tablet(True, True, vm_mode.HVM)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(True, False, vm_mode.HVM)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(False, True, vm_mode.HVM)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(False, False, vm_mode.HVM)
self.assertIsNone(tablet)
tablet = self._test_get_guest_usb_tablet(True, True, "foo")
self.assertIsNone(tablet)
tablet = self._test_get_guest_usb_tablet(
False, True, vm_mode.HVM, True)
self.assertIsNone(tablet)
def _test_get_guest_config_with_watchdog_action_flavor(self,
hw_watchdog_action="hw:watchdog_action"):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {hw_watchdog_action: 'none'}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(9, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestWatchdog)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("none", cfg.devices[7].action)
def test_get_guest_config_with_watchdog_action_through_flavor(self):
self._test_get_guest_config_with_watchdog_action_flavor()
# TODO(pkholkin): the test accepting old property name 'hw_watchdog_action'
# should be removed in the next release
def test_get_guest_config_with_watchdog_action_through_flavor_no_scope(
self):
self._test_get_guest_config_with_watchdog_action_flavor(
hw_watchdog_action="hw_watchdog_action")
def test_get_guest_config_with_watchdog_overrides_flavor(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_watchdog_action': 'none'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_watchdog_action": "pause"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(9, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestWatchdog)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("pause", cfg.devices[7].action)
def test_get_guest_config_with_video_driver_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_video_model": "vmvga"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[5].type, "vnc")
self.assertEqual(cfg.devices[6].type, "vmvga")
def test_get_guest_config_with_qga_through_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_qemu_guest_agent": "yes"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 9)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
self.assertEqual(cfg.devices[7].type, "unix")
self.assertEqual(cfg.devices[7].target_name, "org.qemu.guest_agent.0")
def test_get_guest_config_with_video_driver_vram(self):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_video:ram_max_mb': "100"}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[5].type, "spice")
self.assertEqual(cfg.devices[6].type, "qxl")
self.assertEqual(cfg.devices[6].vram, 64 * units.Mi / units.Ki)
@mock.patch('nova.virt.disk.api.teardown_container')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_unmount_fs_if_error_during_lxc_create_domain(self,
mock_get_inst_path, mock_ensure_tree, mock_setup_container,
mock_get_info, mock_teardown):
"""If we hit an error during a `_create_domain` call to `libvirt+lxc`
we need to ensure the guest FS is unmounted from the host so that any
future `lvremove` calls will work.
"""
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_get_info.side_effect = exception.InstanceNotFound(
instance_id='foo')
drvr._conn.defineXML = mock.Mock()
drvr._conn.defineXML.side_effect = ValueError('somethingbad')
with test.nested(
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, 'firewall_driver'),
mock.patch.object(drvr, 'cleanup')):
self.assertRaises(ValueError,
drvr._create_domain_and_network,
self.context,
'xml',
mock_instance, None, None)
mock_teardown.assert_called_with(container_dir='/tmp/rootfs')
def test_video_driver_flavor_limit_not_set(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with mock.patch.object(objects.Instance, 'save'):
self.assertRaises(exception.RequestedVRamTooHigh,
drvr._get_guest_config,
instance_ref,
[],
image_meta,
disk_info)
def test_video_driver_ram_above_flavor_limit(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
instance_ref = objects.Instance(**self.test_instance)
instance_type = instance_ref.get_flavor()
instance_type.extra_specs = {'hw_video:ram_max_mb': "50"}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}})
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with mock.patch.object(objects.Instance, 'save'):
self.assertRaises(exception.RequestedVRamTooHigh,
drvr._get_guest_config,
instance_ref,
[],
image_meta,
disk_info)
def test_get_guest_config_without_qga_through_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_qemu_guest_agent": "no"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
def test_get_guest_config_with_rng_device(self):
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestRng)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[6].model, 'random')
self.assertIsNone(cfg.devices[6].backend)
self.assertIsNone(cfg.devices[6].rate_bytes)
self.assertIsNone(cfg.devices[6].rate_period)
def test_get_guest_config_with_rng_not_allowed(self):
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 7)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigMemoryBalloon)
def test_get_guest_config_with_rng_limits(self):
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True',
'hw_rng:rate_bytes': '1024',
'hw_rng:rate_period': '2'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestRng)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[6].model, 'random')
self.assertIsNone(cfg.devices[6].backend)
self.assertEqual(cfg.devices[6].rate_bytes, 1024)
self.assertEqual(cfg.devices[6].rate_period, 2)
@mock.patch('nova.virt.libvirt.driver.os.path.exists')
def test_get_guest_config_with_rng_backend(self, mock_path):
self.flags(virt_type='kvm',
use_usb_tablet=False,
rng_dev_path='/dev/hw_rng',
group='libvirt')
mock_path.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestRng)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[6].model, 'random')
self.assertEqual(cfg.devices[6].backend, '/dev/hw_rng')
self.assertIsNone(cfg.devices[6].rate_bytes)
self.assertIsNone(cfg.devices[6].rate_period)
@mock.patch('nova.virt.libvirt.driver.os.path.exists')
def test_get_guest_config_with_rng_dev_not_present(self, mock_path):
self.flags(virt_type='kvm',
use_usb_tablet=False,
rng_dev_path='/dev/hw_rng',
group='libvirt')
mock_path.return_value = False
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(exception.RngDeviceNotExist,
drvr._get_guest_config,
instance_ref,
[],
image_meta, disk_info)
def test_guest_cpu_shares_with_multi_vcpu(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.vcpus = 4
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(4096, cfg.cputune.shares)
def test_get_guest_config_with_cpu_quota(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000',
'quota:cpu_period': '20000'}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(10000, cfg.cputune.shares)
self.assertEqual(20000, cfg.cputune.period)
def test_get_guest_config_with_bogus_cpu_quota(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'quota:cpu_shares': 'fishfood',
'quota:cpu_period': '20000'}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(ValueError,
drvr._get_guest_config,
instance_ref, [], image_meta, disk_info)
def _test_get_guest_config_sysinfo_serial(self, expected_serial):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
cfg = drvr._get_guest_config_sysinfo(instance_ref)
self.assertIsInstance(cfg, vconfig.LibvirtConfigGuestSysinfo)
self.assertEqual(version.vendor_string(),
cfg.system_manufacturer)
self.assertEqual(version.product_string(),
cfg.system_product)
self.assertEqual(version.version_string_with_package(),
cfg.system_version)
self.assertEqual(expected_serial,
cfg.system_serial)
self.assertEqual(instance_ref['uuid'],
cfg.system_uuid)
self.assertEqual("Virtual Machine",
cfg.system_family)
def test_get_guest_config_sysinfo_serial_none(self):
self.flags(sysinfo_serial="none", group="libvirt")
self._test_get_guest_config_sysinfo_serial(None)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_host_sysinfo_serial_hardware")
def test_get_guest_config_sysinfo_serial_hardware(self, mock_uuid):
self.flags(sysinfo_serial="hardware", group="libvirt")
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
mock_uuid.return_value = theuuid
self._test_get_guest_config_sysinfo_serial(theuuid)
@contextlib.contextmanager
def patch_exists(self, result):
real_exists = os.path.exists
def fake_exists(filename):
if filename == "/etc/machine-id":
return result
return real_exists(filename)
with mock.patch.object(os.path, "exists") as mock_exists:
mock_exists.side_effect = fake_exists
yield mock_exists
def test_get_guest_config_sysinfo_serial_os(self):
self.flags(sysinfo_serial="os", group="libvirt")
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
with test.nested(
mock.patch('__builtin__.open',
mock.mock_open(read_data=theuuid)),
self.patch_exists(True)):
self._test_get_guest_config_sysinfo_serial(theuuid)
def test_get_guest_config_sysinfo_serial_os_empty_machine_id(self):
self.flags(sysinfo_serial="os", group="libvirt")
with test.nested(
mock.patch('__builtin__.open', mock.mock_open(read_data="")),
self.patch_exists(True)):
self.assertRaises(exception.NovaException,
self._test_get_guest_config_sysinfo_serial,
None)
def test_get_guest_config_sysinfo_serial_os_no_machine_id_file(self):
self.flags(sysinfo_serial="os", group="libvirt")
with self.patch_exists(False):
self.assertRaises(exception.NovaException,
self._test_get_guest_config_sysinfo_serial,
None)
def test_get_guest_config_sysinfo_serial_auto_hardware(self):
self.flags(sysinfo_serial="auto", group="libvirt")
real_exists = os.path.exists
with test.nested(
mock.patch.object(os.path, "exists"),
mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_host_sysinfo_serial_hardware")
) as (mock_exists, mock_uuid):
def fake_exists(filename):
if filename == "/etc/machine-id":
return False
return real_exists(filename)
mock_exists.side_effect = fake_exists
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
mock_uuid.return_value = theuuid
self._test_get_guest_config_sysinfo_serial(theuuid)
def test_get_guest_config_sysinfo_serial_auto_os(self):
self.flags(sysinfo_serial="auto", group="libvirt")
real_exists = os.path.exists
real_open = builtins.open
with test.nested(
mock.patch.object(os.path, "exists"),
mock.patch.object(builtins, "open"),
) as (mock_exists, mock_open):
def fake_exists(filename):
if filename == "/etc/machine-id":
return True
return real_exists(filename)
mock_exists.side_effect = fake_exists
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
def fake_open(filename, *args, **kwargs):
if filename == "/etc/machine-id":
h = mock.MagicMock()
h.read.return_value = theuuid
h.__enter__.return_value = h
return h
return real_open(filename, *args, **kwargs)
mock_open.side_effect = fake_open
self._test_get_guest_config_sysinfo_serial(theuuid)
def _create_fake_service_compute(self):
service_info = {
'id': 1729,
'host': 'fake',
'report_count': 0
}
service_ref = objects.Service(**service_info)
compute_info = {
'id': 1729,
'vcpus': 2,
'memory_mb': 1024,
'local_gb': 2048,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'free_ram_mb': 1024,
'free_disk_gb': 2048,
'hypervisor_type': 'xen',
'hypervisor_version': 1,
'running_vms': 0,
'cpu_info': '',
'current_workload': 0,
'service_id': service_ref['id'],
'host': service_ref['host']
}
compute_ref = objects.ComputeNode(**compute_info)
return (service_ref, compute_ref)
def test_get_guest_config_with_pci_passthrough_kvm(self):
self.flags(virt_type='kvm', group='libvirt')
service_ref, compute_ref = self._create_fake_service_compute()
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status=fields.PciDeviceStatus.ALLOCATED,
address='0000:00:00.1',
compute_id=compute_ref['id'],
instance_uuid=instance.uuid,
request_id=None,
extra_info={})
pci_device = objects.PciDevice(**pci_device_info)
pci_list = objects.PciDeviceList()
pci_list.objects.append(pci_device)
instance.pci_devices = pci_list
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
cfg = drvr._get_guest_config(instance, [],
image_meta, disk_info)
had_pci = 0
# care only about the PCI devices
for dev in cfg.devices:
if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
had_pci += 1
self.assertEqual(dev.type, 'pci')
self.assertEqual(dev.managed, 'yes')
self.assertEqual(dev.mode, 'subsystem')
self.assertEqual(dev.domain, "0000")
self.assertEqual(dev.bus, "00")
self.assertEqual(dev.slot, "00")
self.assertEqual(dev.function, "1")
self.assertEqual(had_pci, 1)
def test_get_guest_config_with_pci_passthrough_xen(self):
self.flags(virt_type='xen', group='libvirt')
service_ref, compute_ref = self._create_fake_service_compute()
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status=fields.PciDeviceStatus.ALLOCATED,
address='0000:00:00.2',
compute_id=compute_ref['id'],
instance_uuid=instance.uuid,
request_id=None,
extra_info={})
pci_device = objects.PciDevice(**pci_device_info)
pci_list = objects.PciDeviceList()
pci_list.objects.append(pci_device)
instance.pci_devices = pci_list
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
cfg = drvr._get_guest_config(instance, [],
image_meta, disk_info)
had_pci = 0
# care only about the PCI devices
for dev in cfg.devices:
if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
had_pci += 1
self.assertEqual(dev.type, 'pci')
self.assertEqual(dev.managed, 'no')
self.assertEqual(dev.mode, 'subsystem')
self.assertEqual(dev.domain, "0000")
self.assertEqual(dev.bus, "00")
self.assertEqual(dev.slot, "00")
self.assertEqual(dev.function, "2")
self.assertEqual(had_pci, 1)
def test_get_guest_config_os_command_line_through_image_meta(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
self.test_instance['kernel_id'] = "fake_kernel_id"
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"os_command_line":
"fake_os_command_line"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_cmdline, "fake_os_command_line")
def test_get_guest_config_os_command_line_without_kernel_id(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"os_command_line":
"fake_os_command_line"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsNone(cfg.os_cmdline)
def test_get_guest_config_os_command_empty(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
self.test_instance['kernel_id'] = "fake_kernel_id"
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"os_command_line": ""}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
# the instance has 'root=/dev/vda console=tty0 console=ttyS0' set by
# default, so testing an empty string and None value in the
# os_command_line image property must pass
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertNotEqual(cfg.os_cmdline, "")
def test_get_guest_config_armv7(self):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = arch.ARMV7
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.flags(virt_type="kvm",
group="libvirt")
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "vexpress-a15")
def test_get_guest_config_aarch64(self):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = arch.AARCH64
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.flags(virt_type="kvm",
group="libvirt")
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "virt")
def test_get_guest_config_machine_type_s390(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigGuestCPU()
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
host_cpu_archs = (arch.S390, arch.S390X)
for host_cpu_arch in host_cpu_archs:
caps.host.cpu.arch = host_cpu_arch
os_mach_type = drvr._get_machine_type(image_meta, caps)
self.assertEqual('s390-ccw-virtio', os_mach_type)
def test_get_guest_config_machine_type_through_image_meta(self):
self.flags(virt_type="kvm",
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_machine_type":
"fake_machine_type"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "fake_machine_type")
def test_get_guest_config_machine_type_from_config(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(hw_machine_type=['x86_64=fake_machine_type'],
group='libvirt')
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
def fake_baselineCPU(cpu, flag):
return """<cpu mode='custom' match='exact'>
<model fallback='allow'>Penryn</model>
<vendor>Intel</vendor>
<feature policy='require' name='xtpr'/>
</cpu>
"""
# Make sure the host arch is mocked as x86_64
self.create_fake_libvirt_mock(getCapabilities=fake_getCapabilities,
baselineCPU=fake_baselineCPU,
getVersion=lambda: 1005001)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "fake_machine_type")
def _test_get_guest_config_ppc64(self, device_index):
"""Test for nova.virt.libvirt.driver.LibvirtDriver._get_guest_config.
"""
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
expected = (arch.PPC64, arch.PPC)
for guestarch in expected:
with mock.patch.object(libvirt_driver.libvirt_utils,
'get_arch',
return_value=guestarch):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta,
disk_info)
self.assertIsInstance(cfg.devices[device_index],
vconfig.LibvirtConfigGuestVideo)
self.assertEqual(cfg.devices[device_index].type, 'vga')
def test_get_guest_config_ppc64_through_image_meta_vnc_enabled(self):
self.flags(enabled=True, group='vnc')
self._test_get_guest_config_ppc64(6)
def test_get_guest_config_ppc64_through_image_meta_spice_enabled(self):
self.flags(enabled=True,
agent_enabled=True,
group='spice')
self._test_get_guest_config_ppc64(8)
def _test_get_guest_config_bootmenu(self, image_meta, extra_specs):
self.flags(virt_type='kvm', group='libvirt')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = extra_specs
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref, image_meta)
conf = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertTrue(conf.os_bootmenu)
def test_get_guest_config_bootmenu_via_image_meta(self):
image_meta = objects.ImageMeta.from_dict(
{"disk_format": "raw",
"properties": {"hw_boot_menu": "True"}})
self._test_get_guest_config_bootmenu(image_meta, {})
def test_get_guest_config_bootmenu_via_extra_specs(self):
image_meta = objects.ImageMeta.from_dict(
self.test_image_meta)
self._test_get_guest_config_bootmenu(image_meta,
{'hw:boot_menu': 'True'})
def test_get_guest_cpu_config_none(self):
self.flags(cpu_mode="none", group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertIsNone(conf.cpu.mode)
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_default_kvm(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_default_uml(self):
self.flags(virt_type="uml",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsNone(conf.cpu)
def test_get_guest_cpu_config_default_lxc(self):
self.flags(virt_type="lxc",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsNone(conf.cpu)
def test_get_guest_cpu_config_host_passthrough(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(cpu_mode="host-passthrough", group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-passthrough")
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_host_model(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(cpu_mode="host-model", group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_custom(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(cpu_mode="custom",
cpu_model="Penryn",
group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "custom")
self.assertEqual(conf.cpu.model, "Penryn")
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_get_guest_cpu_config_numa_topology(self, mock_has_min_version):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.vcpus = 2
instance_ref.numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0,
cpuset=set([0, 1]),
memory=1024,
cpu_pinning={})])
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertIsNone(instance_ref.numa_topology.cells[0].cpu_topology)
drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
topo = instance_ref.numa_topology.cells[0].cpu_topology
self.assertIsNotNone(topo)
self.assertEqual(topo.cores * topo.sockets * topo.threads,
instance_ref.flavor.vcpus)
def test_get_guest_cpu_topology(self):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.vcpus = 8
instance_ref.flavor.extra_specs = {'hw:cpu_max_sockets': '4'}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertEqual(conf.cpu.sockets, 4)
self.assertEqual(conf.cpu.cores, 2)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_memory_balloon_config_by_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('virtio', device.model)
self.assertEqual(10, device.period)
def test_get_guest_memory_balloon_config_disable(self):
self.flags(mem_stats_period_seconds=0, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
no_exist = True
for device in cfg.devices:
if device.root_name == 'memballoon':
no_exist = False
break
self.assertTrue(no_exist)
def test_get_guest_memory_balloon_config_period_value(self):
self.flags(mem_stats_period_seconds=21, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('virtio', device.model)
self.assertEqual(21, device.period)
def test_get_guest_memory_balloon_config_qemu(self):
self.flags(virt_type='qemu', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('virtio', device.model)
self.assertEqual(10, device.period)
def test_get_guest_memory_balloon_config_xen(self):
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('xen', device.model)
self.assertEqual(10, device.period)
def test_get_guest_memory_balloon_config_lxc(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
no_exist = True
for device in cfg.devices:
if device.root_name == 'memballoon':
no_exist = False
break
self.assertTrue(no_exist)
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': vm_mode.HVM})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=True)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': vm_mode.XEN})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=False,
xen_only=True)
def test_xml_and_uri_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=False)
def test_xml_and_uri_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=True)
def test_xml_and_uri_rescue(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=False, rescue=instance_data)
def test_xml_uuid(self):
self._check_xml_and_uuid(self.test_image_meta)
def test_lxc_container_and_uri(self):
instance_data = dict(self.test_instance)
self._check_xml_and_container(instance_data)
def test_xml_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data, None)
def test_xml_user_specified_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data, 'sd')
def test_xml_disk_driver(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_driver(instance_data)
def test_xml_disk_bus_virtio(self):
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self._check_xml_and_disk_bus(image_meta,
None,
(("disk", "virtio", "vda"),))
def test_xml_disk_bus_ide(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
expected = {arch.PPC: ("cdrom", "scsi", "sda"),
arch.PPC64: ("cdrom", "scsi", "sda")}
expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
("cdrom", "ide", "hda"))
image_meta = objects.ImageMeta.from_dict({
"disk_format": "iso"})
self._check_xml_and_disk_bus(image_meta,
None,
(expec_val,))
def test_xml_disk_bus_ide_and_virtio(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
expected = {arch.PPC: ("cdrom", "scsi", "sda"),
arch.PPC64: ("cdrom", "scsi", "sda")}
swap = {'device_name': '/dev/vdc',
'swap_size': 1}
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'size': 1}]
block_device_info = {
'swap': swap,
'ephemerals': ephemerals}
expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
("cdrom", "ide", "hda"))
image_meta = objects.ImageMeta.from_dict({
"disk_format": "iso"})
self._check_xml_and_disk_bus(image_meta,
block_device_info,
(expec_val,
("disk", "virtio", "vdb"),
("disk", "virtio", "vdc")))
@mock.patch.object(host.Host, "list_instance_domains")
def test_list_instances(self, mock_list):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
mock_list.return_value = [vm1, vm2, vm3, vm4]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
names = drvr.list_instances()
self.assertEqual(names[0], vm1.name())
self.assertEqual(names[1], vm2.name())
self.assertEqual(names[2], vm3.name())
self.assertEqual(names[3], vm4.name())
mock_list.assert_called_with(only_running=False)
@mock.patch.object(host.Host, "list_instance_domains")
def test_list_instance_uuids(self, mock_list):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
mock_list.return_value = [vm1, vm2, vm3, vm4]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
uuids = drvr.list_instance_uuids()
self.assertEqual(len(uuids), 4)
self.assertEqual(uuids[0], vm1.UUIDString())
self.assertEqual(uuids[1], vm2.UUIDString())
self.assertEqual(uuids[2], vm3.UUIDString())
self.assertEqual(uuids[3], vm4.UUIDString())
mock_list.assert_called_with(only_running=False)
@mock.patch.object(host.Host, "list_instance_domains")
def test_get_all_block_devices(self, mock_list):
xml = [
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/3'/>
</disk>
</devices>
</domain>
""",
]
mock_list.return_value = [
FakeVirtDomain(xml[0], id=3, name="instance00000001"),
FakeVirtDomain(xml[1], id=1, name="instance00000002"),
FakeVirtDomain(xml[2], id=5, name="instance00000003")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
devices = drvr._get_all_block_devices()
self.assertEqual(devices, ['/path/to/dev/1', '/path/to/dev/3'])
mock_list.assert_called_with()
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.flags(vcpu_pin_set="4-5")
get_online_cpus.return_value = set([4, 5, 6])
expected_vcpus = 2
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus_out_of_range(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.flags(vcpu_pin_set="4-6")
get_online_cpus.return_value = set([4, 5])
self.assertRaises(exception.Invalid, drvr._get_vcpu_total)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus_libvirt_error(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virNodeNumOfDevices',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
self.flags(vcpu_pin_set="4-6")
get_online_cpus.side_effect = not_supported_exc
self.assertRaises(exception.Invalid, drvr._get_vcpu_total)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus_libvirt_error_success(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virNodeNumOfDevices',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
self.flags(vcpu_pin_set="1")
get_online_cpus.side_effect = not_supported_exc
expected_vcpus = 1
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
@mock.patch('nova.virt.libvirt.host.Host.get_cpu_count')
def test_get_host_vcpus_after_hotplug(self, get_cpu_count):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
get_cpu_count.return_value = 2
expected_vcpus = 2
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
get_cpu_count.return_value = 3
expected_vcpus = 3
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_quiesce(self, mock_has_min_version):
self.create_fake_libvirt_mock(lookupByName=self.fake_lookup)
with mock.patch.object(FakeVirtDomain, "fsFreeze") as mock_fsfreeze:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
img_meta = {"properties": {"hw_qemu_guest_agent": "yes",
"os_require_quiesce": "yes"}}
self.assertIsNone(drvr.quiesce(self.context, instance, img_meta))
mock_fsfreeze.assert_called_once_with()
def test_quiesce_not_supported(self):
self.create_fake_libvirt_mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
self.assertRaises(exception.InstanceQuiesceNotSupported,
drvr.quiesce, self.context, instance, None)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_unquiesce(self, mock_has_min_version):
self.create_fake_libvirt_mock(getLibVersion=lambda: 1002005,
lookupByName=self.fake_lookup)
with mock.patch.object(FakeVirtDomain, "fsThaw") as mock_fsthaw:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
img_meta = {"properties": {"hw_qemu_guest_agent": "yes",
"os_require_quiesce": "yes"}}
self.assertIsNone(drvr.unquiesce(self.context, instance, img_meta))
mock_fsthaw.assert_called_once_with()
def test_create_snapshot_metadata(self):
base = objects.ImageMeta.from_dict(
{'disk_format': 'raw'})
instance_data = {'kernel_id': 'kernel',
'project_id': 'prj_id',
'ramdisk_id': 'ram_id',
'os_type': None}
instance = objects.Instance(**instance_data)
img_fmt = 'raw'
snp_name = 'snapshot_name'
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name)
expected = {'is_public': False,
'status': 'active',
'name': snp_name,
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
},
'disk_format': img_fmt,
'container_format': 'bare',
}
self.assertEqual(ret, expected)
# simulate an instance with os_type field defined
# disk format equals to ami
# container format not equals to bare
instance['os_type'] = 'linux'
base = objects.ImageMeta.from_dict(
{'disk_format': 'ami',
'container_format': 'test_container'})
expected['properties']['os_type'] = instance['os_type']
expected['disk_format'] = base.disk_format
expected['container_format'] = base.container_format
ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name)
self.assertEqual(ret, expected)
def test_get_volume_driver(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'driver_volume_type': 'fake',
'data': {'device_path': '/fake',
'access_mode': 'rw'}}
driver = conn._get_volume_driver(connection_info)
result = isinstance(driver, volume_drivers.LibvirtFakeVolumeDriver)
self.assertTrue(result)
def test_get_volume_driver_unknown(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'driver_volume_type': 'unknown',
'data': {'device_path': '/fake',
'access_mode': 'rw'}}
self.assertRaises(
exception.VolumeDriverNotFound,
conn._get_volume_driver,
connection_info
)
@mock.patch.object(volume_drivers.LibvirtFakeVolumeDriver,
'connect_volume')
@mock.patch.object(volume_drivers.LibvirtFakeVolumeDriver, 'get_config')
def test_get_volume_config(self, get_config, connect_volume):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'driver_volume_type': 'fake',
'data': {'device_path': '/fake',
'access_mode': 'rw'}}
bdm = {'device_name': 'vdb',
'disk_bus': 'fake-bus',
'device_type': 'fake-type'}
disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
'dev': 'vdb'}
mock_config = mock.MagicMock()
get_config.return_value = mock_config
config = drvr._get_volume_config(connection_info, disk_info)
get_config.assert_called_once_with(connection_info, disk_info)
self.assertEqual(mock_config, config)
def test_attach_invalid_volume_type(self):
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
instance = objects.Instance(**self.test_instance)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.VolumeDriverNotFound,
drvr.attach_volume, None,
{"driver_volume_type": "badtype"},
instance,
"/dev/sda")
def test_attach_blockio_invalid_hypervisor(self):
self.flags(virt_type='fake_type', group='libvirt')
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
instance = objects.Instance(**self.test_instance)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InvalidHypervisorType,
drvr.attach_volume, None,
{"driver_volume_type": "fake",
"data": {"logical_block_size": "4096",
"physical_block_size": "4096"}
},
instance,
"/dev/sda")
@mock.patch.object(fakelibvirt.virConnect, "getLibVersion")
def test_attach_blockio_invalid_version(self, mock_version):
mock_version.return_value = (0 * 1000 * 1000) + (9 * 1000) + 8
self.flags(virt_type='qemu', group='libvirt')
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
instance = objects.Instance(**self.test_instance)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.Invalid,
drvr.attach_volume, None,
{"driver_volume_type": "fake",
"data": {"logical_block_size": "4096",
"physical_block_size": "4096"}
},
instance,
"/dev/sda")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
@mock.patch('nova.virt.libvirt.host.Host.get_domain')
def test_attach_volume_with_vir_domain_affect_live_flag(self,
mock_get_domain, mock_get_info, get_image):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
image_meta = {}
get_image.return_value = image_meta
mock_dom = mock.MagicMock()
mock_get_domain.return_value = mock_dom
connection_info = {"driver_volume_type": "fake",
"data": {"device_path": "/fake",
"access_mode": "rw"}}
bdm = {'device_name': 'vdb',
'disk_bus': 'fake-bus',
'device_type': 'fake-type'}
disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
'dev': 'vdb'}
mock_get_info.return_value = disk_info
mock_conf = mock.MagicMock()
flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
with test.nested(
mock.patch.object(drvr, '_connect_volume'),
mock.patch.object(drvr, '_get_volume_config',
return_value=mock_conf),
mock.patch.object(drvr, '_set_cache_mode')
) as (mock_connect_volume, mock_get_volume_config,
mock_set_cache_mode):
for state in (power_state.RUNNING, power_state.PAUSED):
mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
drvr.attach_volume(self.context, connection_info, instance,
"/dev/vdb", disk_bus=bdm['disk_bus'],
device_type=bdm['device_type'])
mock_get_domain.assert_called_with(instance)
mock_get_info.assert_called_with(
instance,
CONF.libvirt.virt_type,
test.MatchType(objects.ImageMeta),
bdm)
mock_connect_volume.assert_called_with(
connection_info, disk_info)
mock_get_volume_config.assert_called_with(
connection_info, disk_info)
mock_set_cache_mode.assert_called_with(mock_conf)
mock_dom.attachDeviceFlags.assert_called_with(
mock_conf.to_xml(), flags=flags)
@mock.patch('nova.virt.libvirt.host.Host.get_domain')
def test_detach_volume_with_vir_domain_affect_live_flag(self,
mock_get_domain):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_xml = """<domain>
<devices>
<disk type='file'>
<source file='/path/to/fake-volume'/>
<target dev='vdc' bus='virtio'/>
</disk>
</devices>
</domain>"""
mock_dom = mock.MagicMock()
mock_dom.XMLDesc.return_value = mock_xml
connection_info = {"driver_volume_type": "fake",
"data": {"device_path": "/fake",
"access_mode": "rw"}}
flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
with mock.patch.object(drvr, '_disconnect_volume') as \
mock_disconnect_volume:
for state in (power_state.RUNNING, power_state.PAUSED):
mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
mock_get_domain.return_value = mock_dom
drvr.detach_volume(connection_info, instance, '/dev/vdc')
mock_get_domain.assert_called_with(instance)
mock_dom.detachDeviceFlags.assert_called_with("""<disk type="file" device="disk">
<source file="/path/to/fake-volume"/>
<target bus="virtio" dev="vdc"/>
</disk>
""", flags=flags)
mock_disconnect_volume.assert_called_with(
connection_info, 'vdc')
def test_multi_nic(self):
network_info = _fake_network_info(self.stubs, 2)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta)
tree = etree.fromstring(xml)
interfaces = tree.findall("./devices/interface")
self.assertEqual(len(interfaces), 2)
self.assertEqual(interfaces[0].get('type'), 'bridge')
def _behave_supports_direct_io(self, raise_open=False, raise_write=False,
exc=ValueError()):
open_behavior = os.open(os.path.join('.', '.directio.test'),
os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
if raise_open:
open_behavior.AndRaise(exc)
else:
open_behavior.AndReturn(3)
write_bahavior = os.write(3, mox.IgnoreArg())
if raise_write:
write_bahavior.AndRaise(exc)
else:
os.close(3)
os.unlink(3)
def test_supports_direct_io(self):
# O_DIRECT is not supported on all Python runtimes, so on platforms
# where it's not supported (e.g. Mac), we can still test the code-path
# by stubbing out the value.
if not hasattr(os, 'O_DIRECT'):
# `mock` seems to have trouble stubbing an attr that doesn't
# originally exist, so falling back to stubbing out the attribute
# directly.
os.O_DIRECT = 16384
self.addCleanup(delattr, os, 'O_DIRECT')
einval = OSError()
einval.errno = errno.EINVAL
self.mox.StubOutWithMock(os, 'open')
self.mox.StubOutWithMock(os, 'write')
self.mox.StubOutWithMock(os, 'close')
self.mox.StubOutWithMock(os, 'unlink')
_supports_direct_io = libvirt_driver.LibvirtDriver._supports_direct_io
self._behave_supports_direct_io()
self._behave_supports_direct_io(raise_write=True)
self._behave_supports_direct_io(raise_open=True)
self._behave_supports_direct_io(raise_write=True, exc=einval)
self._behave_supports_direct_io(raise_open=True, exc=einval)
self.mox.ReplayAll()
self.assertTrue(_supports_direct_io('.'))
self.assertRaises(ValueError, _supports_direct_io, '.')
self.assertRaises(ValueError, _supports_direct_io, '.')
self.assertFalse(_supports_direct_io('.'))
self.assertFalse(_supports_direct_io('.'))
self.mox.VerifyAll()
def _check_xml_and_container(self, instance):
instance_ref = objects.Instance(**instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(drvr._uri(), 'lxc:///')
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta)
tree = etree.fromstring(xml)
check = [
(lambda t: t.find('.').get('type'), 'lxc'),
(lambda t: t.find('./os/type').text, 'exe'),
(lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]
for i, (check, expected_result) in enumerate(check):
self.assertEqual(check(tree),
expected_result,
'%s failed common check %d' % (xml, i))
target = tree.find('./devices/filesystem/source').get('dir')
self.assertTrue(len(target) > 0)
def _check_xml_and_disk_prefix(self, instance, prefix):
instance_ref = objects.Instance(**instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
def _get_prefix(p, default):
if p:
return p + 'a'
return default
type_disk_map = {
'qemu': [
(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'xen': [
(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'xvda'))],
'kvm': [
(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'uml': [
(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'ubda'))]
}
for (virt_type, checks) in six.iteritems(type_disk_map):
self.flags(virt_type=virt_type, group='libvirt')
if prefix:
self.flags(disk_prefix=prefix, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
def _check_xml_and_disk_driver(self, image_meta):
os_open = os.open
directio_supported = True
def os_open_stub(path, flags, *args, **kwargs):
if flags & os.O_DIRECT:
if not directio_supported:
raise OSError(errno.EINVAL,
'%s: %s' % (os.strerror(errno.EINVAL), path))
flags &= ~os.O_DIRECT
return os_open(path, flags, *args, **kwargs)
self.stubs.Set(os, 'open', os_open_stub)
@staticmethod
def connection_supports_direct_io_stub(dirpath):
return directio_supported
self.stubs.Set(libvirt_driver.LibvirtDriver,
'_supports_direct_io', connection_supports_direct_io_stub)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for guest_disk in disks:
self.assertEqual(guest_disk.get("cache"), "none")
directio_supported = False
# The O_DIRECT availability is cached on first use in
# LibvirtDriver, hence we re-create it here
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for guest_disk in disks:
self.assertEqual(guest_disk.get("cache"), "writethrough")
def _check_xml_and_disk_bus(self, image_meta,
block_device_info, wantConfig):
instance_ref = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
block_device_info)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta,
block_device_info=block_device_info)
tree = etree.fromstring(xml)
got_disks = tree.findall('./devices/disk')
got_disk_targets = tree.findall('./devices/disk/target')
for i in range(len(wantConfig)):
want_device_type = wantConfig[i][0]
want_device_bus = wantConfig[i][1]
want_device_dev = wantConfig[i][2]
got_device_type = got_disks[i].get('device')
got_device_bus = got_disk_targets[i].get('bus')
got_device_dev = got_disk_targets[i].get('dev')
self.assertEqual(got_device_type, want_device_type)
self.assertEqual(got_device_bus, want_device_bus)
self.assertEqual(got_device_dev, want_device_dev)
def _check_xml_and_uuid(self, image_meta):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
self.assertEqual(tree.find('./uuid').text,
instance_ref['uuid'])
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_host_sysinfo_serial_hardware",)
def _check_xml_and_uri(self, instance, mock_serial,
expect_ramdisk=False, expect_kernel=False,
rescue=None, expect_xen_hvm=False, xen_only=False):
mock_serial.return_value = "cef19ce0-0ca2-11df-855d-b19fbce37686"
instance_ref = objects.Instance(**instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
xen_vm_mode = vm_mode.XEN
if expect_xen_hvm:
xen_vm_mode = vm_mode.HVM
type_uri_map = {'qemu': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'kvm': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'uml': ('uml:///system',
[(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./os/type').text,
vm_mode.UML)]),
'xen': ('xen:///',
[(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./os/type').text,
xen_vm_mode)])}
if expect_xen_hvm or xen_only:
hypervisors_to_check = ['xen']
else:
hypervisors_to_check = ['qemu', 'kvm', 'xen']
for hypervisor_type in hypervisors_to_check:
check_list = type_uri_map[hypervisor_type][1]
if rescue:
suffix = '.rescue'
else:
suffix = ''
if expect_kernel:
check = (lambda t: self.relpath(t.find('./os/kernel').text).
split('/')[1], 'kernel' + suffix)
else:
check = (lambda t: t.find('./os/kernel'), None)
check_list.append(check)
if expect_kernel:
check = (lambda t: "no_timer_check" in t.find('./os/cmdline').
text, hypervisor_type == "qemu")
check_list.append(check)
# Hypervisors that only support vm_mode.HVM and Xen
# should not produce configuration that results in kernel
# arguments
if not expect_kernel and (hypervisor_type in
['qemu', 'kvm', 'xen']):
check = (lambda t: t.find('./os/root'), None)
check_list.append(check)
check = (lambda t: t.find('./os/cmdline'), None)
check_list.append(check)
if expect_ramdisk:
check = (lambda t: self.relpath(t.find('./os/initrd').text).
split('/')[1], 'ramdisk' + suffix)
else:
check = (lambda t: t.find('./os/initrd'), None)
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
xpath = "./sysinfo/system/entry"
check = (lambda t: t.findall(xpath)[0].get("name"),
"manufacturer")
check_list.append(check)
check = (lambda t: t.findall(xpath)[0].text,
version.vendor_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].get("name"),
"product")
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].text,
version.product_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[2].get("name"),
"version")
check_list.append(check)
# NOTE(sirp): empty strings don't roundtrip in lxml (they are
# converted to None), so we need an `or ''` to correct for that
check = (lambda t: t.findall(xpath)[2].text or '',
version.version_string_with_package())
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].get("name"),
"serial")
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].text,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].get("name"),
"uuid")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].text,
instance['uuid'])
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
check = (lambda t: t.findall('./devices/serial')[0].get(
'type'), 'file')
check_list.append(check)
check = (lambda t: t.findall('./devices/serial')[1].get(
'type'), 'pty')
check_list.append(check)
check = (lambda t: self.relpath(t.findall(
'./devices/serial/source')[0].get('path')).
split('/')[1], 'console.log')
check_list.append(check)
else:
check = (lambda t: t.find('./devices/console').get(
'type'), 'pty')
check_list.append(check)
common_checks = [
(lambda t: t.find('.').tag, 'domain'),
(lambda t: t.find('./memory').text, '2097152')]
if rescue:
common_checks += [
(lambda t: self.relpath(t.findall('./devices/disk/source')[0].
get('file')).split('/')[1], 'disk.rescue'),
(lambda t: self.relpath(t.findall('./devices/disk/source')[1].
get('file')).split('/')[1], 'disk')]
else:
common_checks += [(lambda t: self.relpath(t.findall(
'./devices/disk/source')[0].get('file')).split('/')[1],
'disk')]
common_checks += [(lambda t: self.relpath(t.findall(
'./devices/disk/source')[1].get('file')).split('/')[1],
'disk.local')]
for virt_type in hypervisors_to_check:
expected_uri = type_uri_map[virt_type][0]
checks = type_uri_map[virt_type][1]
self.flags(virt_type=virt_type, group='libvirt')
with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(drvr._uri(), expected_uri)
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
rescue=rescue)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta,
rescue=rescue)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
for i, (check, expected_result) in enumerate(common_checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed common check %d' %
(check(tree), expected_result, i))
filterref = './devices/interface/filterref'
vif = network_info[0]
nic_id = vif['address'].replace(':', '')
fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(), drvr)
instance_filter_name = fw._instance_filter_name(instance_ref,
nic_id)
self.assertEqual(tree.find(filterref).get('filter'),
instance_filter_name)
# This test is supposed to make sure we don't
# override a specifically set uri
#
# Deliberately not just assigning this string to CONF.connection_uri
# and checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the CONF.
testuri = 'something completely different'
self.flags(connection_uri=testuri, group='libvirt')
for (virt_type, (expected_uri, checks)) in six.iteritems(type_uri_map):
self.flags(virt_type=virt_type, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(drvr._uri(), testuri)
def test_ensure_filtering_rules_for_instance_timeout(self):
# ensure_filtering_fules_for_instance() finishes with timeout.
# Preparing mocks
def fake_none(self, *args):
return
class FakeTime(object):
def __init__(self):
self.counter = 0
def sleep(self, t):
self.counter += t
fake_timer = FakeTime()
def fake_sleep(t):
fake_timer.sleep(t)
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock()
instance_ref = objects.Instance(**self.test_instance)
# Start test
self.mox.ReplayAll()
try:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr.firewall_driver,
'setup_basic_filtering',
fake_none)
self.stubs.Set(drvr.firewall_driver,
'prepare_instance_filter',
fake_none)
self.stubs.Set(drvr.firewall_driver,
'instance_filter_exists',
fake_none)
self.stubs.Set(greenthread,
'sleep',
fake_sleep)
drvr.ensure_filtering_rules_for_instance(instance_ref,
network_info)
except exception.NovaException as e:
msg = ('The firewall filter for %s does not exist' %
instance_ref['name'])
c1 = (0 <= six.text_type(e).find(msg))
self.assertTrue(c1)
self.assertEqual(29, fake_timer.counter, "Didn't wait the expected "
"amount of time")
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_all_pass_with_block_migration(
self, mock_cpu, mock_test_file):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
filename = "file"
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
# No need for the src_compute_info
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, None, compute_info, True)
self.assertThat({"filename": "file",
'image_type': 'default',
'disk_available_mb': 409600,
"disk_over_commit": False,
"block_migration": True},
matchers.DictMatches(return_value))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_all_pass_no_block_migration(
self, mock_cpu, mock_test_file):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
filename = "file"
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
# No need for the src_compute_info
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, None, compute_info, False)
self.assertThat({"filename": "file",
"image_type": 'default',
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": None},
matchers.DictMatches(return_value))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_compare_cpu')
def test_check_can_live_migrate_guest_cpu_none_model(
self, mock_cpu, mock_test_file):
# Tests that when instance.vcpu_model.model is None, the host cpu
# model is used for live migration.
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
instance_ref.vcpu_model.model = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
result = drvr.check_can_live_migrate_destination(
self.context, instance_ref, compute_info, compute_info)
mock_cpu.assert_called_once_with(None, 'asdf')
expected_result = {"filename": 'fake',
"image_type": CONF.libvirt.images_type,
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": None}
self.assertDictEqual(expected_result, result)
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_no_instance_cpu_info(
self, mock_cpu, mock_test_file):
instance_ref = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': jsonutils.dumps({
"vendor": "AMD",
"arch": arch.I686,
"features": ["sse3"],
"model": "Opteron_G3",
"topology": {"cores": 2, "threads": 1, "sockets": 4}
})}
filename = "file"
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, compute_info, compute_info, False)
self.assertThat({"filename": "file",
"image_type": 'default',
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": None},
matchers.DictMatches(return_value))
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_incompatible_cpu_raises(
self, mock_cpu):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
mock_cpu.side_effect = exception.InvalidCPUInfo(reason='foo')
self.assertRaises(exception.InvalidCPUInfo,
drvr.check_can_live_migrate_destination,
self.context, instance_ref,
compute_info, compute_info, False)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_compatible_host_cpu(self, mock_vconfig, mock_compare):
mock_compare.return_value = 5
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info))
self.assertIsNone(ret)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_handles_not_supported_error_gracefully(self,
mock_vconfig,
mock_compare):
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virCompareCPU',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_compare.side_effect = not_supported_exc
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info))
self.assertIsNone(ret)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt.LibvirtDriver,
'_vcpu_model_to_cpu_config')
def test_compare_cpu_compatible_guest_cpu(self, mock_vcpu_to_cpu,
mock_compare):
mock_compare.return_value = 6
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(jsonutils.dumps(_fake_cpu_info), None)
self.assertIsNone(ret)
def test_compare_cpu_virt_type_xen(self):
self.flags(virt_type='xen', group='libvirt')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, None)
self.assertIsNone(ret)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_invalid_cpuinfo_raises(self, mock_vconfig,
mock_compare):
mock_compare.return_value = 0
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InvalidCPUInfo,
conn._compare_cpu, None,
jsonutils.dumps(_fake_cpu_info))
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_incompatible_cpu_raises(self, mock_vconfig,
mock_compare):
mock_compare.side_effect = fakelibvirt.libvirtError('cpu')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationPreCheckError,
conn._compare_cpu, None,
jsonutils.dumps(_fake_cpu_info))
def test_check_can_live_migrate_dest_cleanup_works_correctly(self):
objects.Instance(**self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": True,
"disk_over_commit": False,
"disk_available_mb": 1024}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_cleanup_shared_storage_test_file')
drvr._cleanup_shared_storage_test_file("file")
self.mox.ReplayAll()
drvr.check_can_live_migrate_destination_cleanup(self.context,
dest_check_data)
def _mock_can_live_migrate_source(self, block_migration=False,
is_shared_block_storage=False,
is_shared_instance_path=False,
is_booted_from_volume=False,
disk_available_mb=1024,
block_device_info=None,
block_device_text=None):
instance = objects.Instance(**self.test_instance)
dest_check_data = {'filename': 'file',
'image_type': 'default',
'block_migration': block_migration,
'disk_over_commit': False,
'disk_available_mb': disk_available_mb}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_is_shared_block_storage')
drvr._is_shared_block_storage(instance, dest_check_data,
block_device_info).AndReturn(is_shared_block_storage)
self.mox.StubOutWithMock(drvr, '_check_shared_storage_test_file')
drvr._check_shared_storage_test_file('file').AndReturn(
is_shared_instance_path)
self.mox.StubOutWithMock(drvr, "get_instance_disk_info")
drvr.get_instance_disk_info(instance,
block_device_info=block_device_info).\
AndReturn(block_device_text)
self.mox.StubOutWithMock(drvr, '_is_booted_from_volume')
drvr._is_booted_from_volume(instance, block_device_text).AndReturn(
is_booted_from_volume)
return (instance, dest_check_data, drvr)
def test_check_can_live_migrate_source_block_migration(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True)
self.mox.StubOutWithMock(drvr, "_assert_dest_node_has_enough_disk")
drvr._assert_dest_node_has_enough_disk(
self.context, instance, dest_check_data['disk_available_mb'],
False, None)
self.mox.ReplayAll()
ret = drvr.check_can_live_migrate_source(self.context, instance,
dest_check_data)
self.assertIsInstance(ret, dict)
self.assertIn('is_shared_block_storage', ret)
self.assertIn('is_shared_instance_path', ret)
self.assertEqual(ret['is_shared_instance_path'],
ret['is_shared_storage'])
def test_check_can_live_migrate_source_shared_block_storage(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
is_shared_block_storage=True)
self.mox.ReplayAll()
drvr.check_can_live_migrate_source(self.context, instance,
dest_check_data)
def test_check_can_live_migrate_source_shared_instance_path(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
is_shared_instance_path=True)
self.mox.ReplayAll()
drvr.check_can_live_migrate_source(self.context, instance,
dest_check_data)
def test_check_can_live_migrate_source_non_shared_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source()
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
drvr.check_can_live_migrate_source, self.context,
instance, dest_check_data)
def test_check_can_live_migrate_source_shared_block_migration_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True,
is_shared_block_storage=True)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidLocalStorage,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
def test_check_can_live_migrate_shared_path_block_migration_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True,
is_shared_instance_path=True)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidLocalStorage,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data, None)
def test_check_can_live_migrate_non_shared_non_block_migration_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source()
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
def test_check_can_live_migrate_source_with_dest_not_enough_disk(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True,
disk_available_mb=0)
drvr.get_instance_disk_info(instance,
block_device_info=None).AndReturn(
'[{"virt_disk_size":2}]')
self.mox.ReplayAll()
self.assertRaises(exception.MigrationError,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
def test_check_can_live_migrate_source_booted_from_volume(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
is_booted_from_volume=True,
block_device_text='[]')
self.mox.ReplayAll()
drvr.check_can_live_migrate_source(self.context, instance,
dest_check_data)
def test_check_can_live_migrate_source_booted_from_volume_with_swap(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
is_booted_from_volume=True,
block_device_text='[{"path":"disk.swap"}]')
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_assert_dest_node_has_enough_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_has_local_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_booted_from_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'get_instance_disk_info')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_shared_block_storage')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_check_shared_storage_test_file')
def test_check_can_live_migrate_source_block_migration_with_bdm(
self, mock_check, mock_shared_block, mock_get_bdi,
mock_booted_from_volume, mock_has_local, mock_enough):
mock_check.return_value = False
mock_shared_block.return_value = False
bdi = {'block_device_mapping': ['bdm']}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dest_check_data = {'filename': 'file',
'image_type': 'default',
'block_migration': True,
'disk_over_commit': False,
'disk_available_mb': 100}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationPreCheckError,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data,
block_device_info=bdi)
def _is_shared_block_storage_test_create_mocks(self, disks):
# Test data
instance_xml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>{}</devices></domain>")
disks_xml = ''
for dsk in disks:
if dsk['type'] is not 'network':
disks_xml = ''.join([disks_xml,
"<disk type='{type}'>"
"<driver name='qemu' type='{driver}'/>"
"<source {source}='{source_path}'/>"
"<target dev='{target_dev}' bus='virtio'/>"
"</disk>".format(**dsk)])
else:
disks_xml = ''.join([disks_xml,
"<disk type='{type}'>"
"<driver name='qemu' type='{driver}'/>"
"<source protocol='{source_proto}'"
"name='{source_image}' >"
"<host name='hostname' port='7000'/>"
"<config file='/path/to/file'/>"
"</source>"
"<target dev='{target_dev}'"
"bus='ide'/>".format(**dsk)])
# Preparing mocks
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
mock_virDomain.XMLDesc = mock.Mock()
mock_virDomain.XMLDesc.return_value = (instance_xml.format(disks_xml))
mock_lookup = mock.Mock()
def mock_lookup_side_effect(name):
return mock_virDomain
mock_lookup.side_effect = mock_lookup_side_effect
mock_getsize = mock.Mock()
mock_getsize.return_value = "10737418240"
return (mock_getsize, mock_lookup)
def test_is_shared_block_storage_rbd(self):
self.flags(images_type='rbd', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_instance_disk_info = mock.Mock()
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertTrue(drvr._is_shared_block_storage(instance,
{'image_type': 'rbd'},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_lvm(self):
self.flags(images_type='lvm', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, {'image_type': 'lvm'},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_qcow2(self):
self.flags(images_type='qcow2', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, {'image_type': 'qcow2'},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_rbd_only_source(self):
self.flags(images_type='rbd', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, {'is_shared_instance_path': False},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_rbd_only_dest(self):
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, {'image_type': 'rbd',
'is_shared_instance_path': False},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_volume_backed(self):
disks = [{'type': 'block',
'driver': 'raw',
'source': 'dev',
'source_path': '/dev/disk',
'target_dev': 'vda'}]
bdi = {'block_device_mapping': [
{'connection_info': 'info', 'mount_device': '/dev/vda'}]}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
(mock_getsize, mock_lookup) =\
self._is_shared_block_storage_test_create_mocks(disks)
with mock.patch.object(host.Host, 'get_domain', mock_lookup):
self.assertTrue(drvr._is_shared_block_storage(instance,
{'is_volume_backed': True,
'is_shared_instance_path': False},
block_device_info = bdi))
mock_lookup.assert_called_once_with(instance)
def test_is_shared_block_storage_volume_backed_with_disk(self):
disks = [{'type': 'block',
'driver': 'raw',
'source': 'dev',
'source_path': '/dev/disk',
'target_dev': 'vda'},
{'type': 'file',
'driver': 'raw',
'source': 'file',
'source_path': '/instance/disk.local',
'target_dev': 'vdb'}]
bdi = {'block_device_mapping': [
{'connection_info': 'info', 'mount_device': '/dev/vda'}]}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
(mock_getsize, mock_lookup) =\
self._is_shared_block_storage_test_create_mocks(disks)
with test.nested(
mock.patch.object(os.path, 'getsize', mock_getsize),
mock.patch.object(host.Host, 'get_domain', mock_lookup)):
self.assertFalse(drvr._is_shared_block_storage(
instance,
{'is_volume_backed': True,
'is_shared_instance_path': False},
block_device_info = bdi))
mock_getsize.assert_called_once_with('/instance/disk.local')
mock_lookup.assert_called_once_with(instance)
def test_is_shared_block_storage_nfs(self):
bdi = {'block_device_mapping': []}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_backend = mock.MagicMock()
mock_image_backend.backend.return_value = mock_backend
mock_backend.is_file_in_instance_path.return_value = True
mock_get_instance_disk_info = mock.Mock()
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
self.assertTrue(drvr._is_shared_block_storage('instance',
{'is_shared_instance_path': True},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_live_migration_update_graphics_xml(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
xml_tmpl = ("<domain type='kvm'>"
"<devices>"
"<graphics type='vnc' listen='{vnc}'>"
"<listen address='{vnc}'/>"
"</graphics>"
"<graphics type='spice' listen='{spice}'>"
"<listen address='{spice}'/>"
"</graphics>"
"</devices>"
"</domain>")
initial_xml = xml_tmpl.format(vnc='1.2.3.4',
spice='5.6.7.8')
target_xml = xml_tmpl.format(vnc='10.0.0.1',
spice='10.0.0.2')
target_xml = etree.tostring(etree.fromstring(target_xml))
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI2")
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
initial_xml)
vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest',
None,
target_xml,
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError("ERR"))
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '10.0.0.1', 'spice': '10.0.0.2'}}}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
def test_live_migration_update_volume_xml(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'cde.67890.opst-lun-Z')
# start test
migrate_data = {'pre_live_migration_result':
{'volume': {u'58a84f6d-3f0c-4e19-a0af-eb657b790657':
{'connection_info': {u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}},
'disk_info': {'bus': u'virtio', 'type': u'disk', 'dev': u'vdb'}}}},
'graphics_listen_addrs': {}}
pre_live_migrate_data = ((migrate_data or {}).
get('pre_live_migration_result', {}))
volume = pre_live_migrate_data.get('volume')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
test_mock = mock.MagicMock()
with mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') as \
mget_info,\
mock.patch.object(drvr._host, 'get_domain') as mget_domain,\
mock.patch.object(fakelibvirt.virDomain, 'migrateToURI2'),\
mock.patch.object(drvr, '_update_xml') as mupdate:
mget_info.side_effect = exception.InstanceNotFound(
instance_id='foo')
mget_domain.return_value = test_mock
test_mock.XMLDesc.return_value = target_xml
self.assertFalse(drvr._live_migration_operation(
self.context, instance_ref, 'dest', False,
migrate_data, test_mock))
mupdate.assert_called_once_with(target_xml, volume, None, None)
def test_update_volume_xml(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
initial_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'cde.67890.opst-lun-Z')
target_xml = etree.tostring(etree.fromstring(target_xml))
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
volume_xml = {'volume': {}}
volume_xml['volume'][serial] = {'connection_info': {}, 'disk_info': {}}
volume_xml['volume'][serial]['connection_info'] = \
{u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}
volume_xml['volume'][serial]['disk_info'] = {'bus': u'virtio',
'type': u'disk',
'dev': u'vdb'}
connection_info = volume_xml['volume'][serial]['connection_info']
disk_info = volume_xml['volume'][serial]['disk_info']
conf = vconfig.LibvirtConfigGuestDisk()
conf.source_device = disk_info['type']
conf.driver_name = "qemu"
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
conf.source_type = "block"
conf.source_path = connection_info['data'].get('device_path')
with mock.patch.object(drvr, '_get_volume_config',
return_value=conf):
parser = etree.XMLParser(remove_blank_text=True)
xml_doc = etree.fromstring(initial_xml, parser)
config = drvr._update_volume_xml(xml_doc,
volume_xml['volume'])
xml_doc = etree.fromstring(target_xml, parser)
self.assertEqual(etree.tostring(xml_doc), etree.tostring(config))
def test_update_volume_xml_no_serial(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
xml_tmpl = """
<domain type='kvm'>
<devices>
<disk type='block' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<source dev='{device_path}'/>
<target bus='virtio' dev='vdb'/>
<serial></serial>
<address type='pci' domain='0x0' bus='0x0' slot='0x04' \
function='0x0'/>
</disk>
</devices>
</domain>
"""
initial_xml = xml_tmpl.format(device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = xml_tmpl.format(device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = etree.tostring(etree.fromstring(target_xml))
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
volume_xml = {'volume': {}}
volume_xml['volume'][serial] = {'connection_info': {}, 'disk_info': {}}
volume_xml['volume'][serial]['connection_info'] = \
{u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}
volume_xml['volume'][serial]['disk_info'] = {'bus': u'virtio',
'type': u'disk',
'dev': u'vdb'}
connection_info = volume_xml['volume'][serial]['connection_info']
disk_info = volume_xml['volume'][serial]['disk_info']
conf = vconfig.LibvirtConfigGuestDisk()
conf.source_device = disk_info['type']
conf.driver_name = "qemu"
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
conf.source_type = "block"
conf.source_path = connection_info['data'].get('device_path')
with mock.patch.object(drvr, '_get_volume_config',
return_value=conf):
xml_doc = etree.fromstring(initial_xml)
config = drvr._update_volume_xml(xml_doc,
volume_xml['volume'])
self.assertEqual(target_xml, etree.tostring(config))
def test_update_volume_xml_no_connection_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
initial_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = etree.tostring(etree.fromstring(target_xml))
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
volume_xml = {'volume': {}}
volume_xml['volume'][serial] = {'info1': {}, 'info2': {}}
conf = vconfig.LibvirtConfigGuestDisk()
with mock.patch.object(drvr, '_get_volume_config',
return_value=conf):
xml_doc = etree.fromstring(initial_xml)
config = drvr._update_volume_xml(xml_doc,
volume_xml['volume'])
self.assertEqual(target_xml, etree.tostring(config))
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI2")
@mock.patch.object(fakelibvirt.virDomain, "XMLDesc")
def test_live_migration_update_serial_console_xml(self, mock_xml,
mock_migrate):
self.compute = importutils.import_object(CONF.compute_manager)
instance_ref = self.test_instance
xml_tmpl = ("<domain type='kvm'>"
"<devices>"
"<console type='tcp'>"
"<source mode='bind' host='{addr}' service='10000'/>"
"</console>"
"</devices>"
"</domain>")
initial_xml = xml_tmpl.format(addr='9.0.0.1')
target_xml = xml_tmpl.format(addr='9.0.0.12')
target_xml = etree.tostring(etree.fromstring(target_xml))
# Preparing mocks
mock_xml.return_value = initial_xml
mock_migrate.side_effect = fakelibvirt.libvirtError("ERR")
# start test
bandwidth = CONF.libvirt.live_migration_bandwidth
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '10.0.0.1', 'spice': '10.0.0.2'},
'serial_listen_addr': '9.0.0.12'}}
dom = fakelibvirt.virDomain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, dom)
mock_xml.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE)
mock_migrate.assert_called_once_with(
CONF.libvirt.live_migration_uri % 'dest',
None, target_xml, mock.ANY, None, bandwidth)
@mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None,
create=True)
def test_live_migration_fails_with_serial_console_without_migratable(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_ref = self.test_instance
CONF.set_override("enabled", True, "serial_console")
dom = fakelibvirt.virDomain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, None, dom)
@mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None,
create=True)
def test_live_migration_uses_migrateToURI_without_migratable_flag(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError("ERR"))
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '0.0.0.0', 'spice': '0.0.0.0'}}}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
def test_live_migration_uses_migrateToURI_without_dest_listen_addrs(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError("ERR"))
# start test
migrate_data = {}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
@mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None,
create=True)
def test_live_migration_fails_without_migratable_flag_or_0_addr(self):
self.flags(enabled=True, vncserver_listen='1.2.3.4', group='vnc')
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '1.2.3.4', 'spice': '1.2.3.4'}}}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
def test_live_migration_raises_exception(self):
# Confirms recover method is called when exceptions are raised.
# Preparing data
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI2")
_bandwidth = CONF.libvirt.live_migration_bandwidth
if getattr(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None) is None:
vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError('ERR'))
else:
vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE
).AndReturn(FakeVirtDomain().XMLDesc(flags=0))
vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest',
None,
mox.IgnoreArg(),
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError('ERR'))
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '127.0.0.1', 'spice': '127.0.0.1'}}}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
self.assertEqual(vm_states.ACTIVE, instance_ref.vm_state)
self.assertEqual(power_state.RUNNING, instance_ref.power_state)
def test_live_migration_raises_unsupported_config_exception(self):
# Tests that when migrateToURI2 fails with VIR_ERR_CONFIG_UNSUPPORTED,
# migrateToURI is used instead.
# Preparing data
instance_ref = objects.Instance(**self.test_instance)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, 'migrateToURI2')
self.mox.StubOutWithMock(vdmock, 'migrateToURI')
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
FakeVirtDomain().XMLDesc(flags=0))
unsupported_config_error = fakelibvirt.libvirtError('ERR')
unsupported_config_error.err = (
fakelibvirt.VIR_ERR_CONFIG_UNSUPPORTED,)
# This is the first error we hit but since the error code is
# VIR_ERR_CONFIG_UNSUPPORTED we'll try migrateToURI.
vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest', None,
mox.IgnoreArg(), mox.IgnoreArg(), None,
_bandwidth).AndRaise(unsupported_config_error)
# This is the second and final error that will actually kill the run,
# we use TestingException to make sure it's not the same libvirtError
# above.
vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
mox.IgnoreArg(), None,
_bandwidth).AndRaise(test.TestingException('oops'))
graphics_listen_addrs = {'vnc': '0.0.0.0', 'spice': '127.0.0.1'}
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs': graphics_listen_addrs}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(
drvr, '_check_graphics_addresses_can_live_migrate')
drvr._check_graphics_addresses_can_live_migrate(graphics_listen_addrs)
self.mox.ReplayAll()
# start test
self.assertRaises(test.TestingException,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
@mock.patch('shutil.rmtree')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy')
def test_rollback_live_migration_at_dest_not_shared(self, mock_destroy,
mock_get_instance_path,
mock_exist,
mock_shutil
):
# destroy method may raise InstanceTerminationFailure or
# InstancePowerOffFailure, here use their base class Invalid.
mock_destroy.side_effect = exception.Invalid(reason='just test')
fake_instance_path = os.path.join(cfg.CONF.instances_path,
'/fake_instance_uuid')
mock_get_instance_path.return_value = fake_instance_path
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
migrate_data = {'is_shared_instance_path': False}
self.assertRaises(exception.Invalid,
drvr.rollback_live_migration_at_destination,
"context", "instance", [], None, True, migrate_data)
mock_exist.assert_called_once_with(fake_instance_path)
mock_shutil.assert_called_once_with(fake_instance_path)
@mock.patch('shutil.rmtree')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy')
def test_rollback_live_migration_at_dest_shared(self, mock_destroy,
mock_get_instance_path,
mock_exist,
mock_shutil
):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
migrate_data = {'is_shared_instance_path': True}
drvr.rollback_live_migration_at_destination("context", "instance", [],
None, True, migrate_data)
mock_destroy.assert_called_once_with("context", "instance", [],
None, True, migrate_data)
self.assertFalse(mock_get_instance_path.called)
self.assertFalse(mock_exist.called)
self.assertFalse(mock_shutil.called)
@mock.patch.object(fakelibvirt.Domain, "XMLDesc")
def test_live_migration_copy_disk_paths(self, mock_xml):
xml = """
<domain>
<name>dummy</name>
<uuid>d4e13113-918e-42fe-9fc9-861693ffd432</uuid>
<devices>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.root"/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.shared"/>
<shareable/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.config"/>
<readonly/>
</disk>
<disk type="block">
<source dev="/dev/mapper/somevol"/>
</disk>
<disk type="network">
<source protocol="https" name="url_path">
<host name="hostname" port="443"/>
</source>
</disk>
</devices>
</domain>"""
mock_xml.return_value = xml
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dom = fakelibvirt.Domain(drvr._get_connection(), xml, False)
guest = libvirt_guest.Guest(dom)
paths = drvr._live_migration_copy_disk_paths(guest)
self.assertEqual(["/var/lib/nova/instance/123/disk.root",
"/dev/mapper/somevol"], paths)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_live_migration_copy_disk_paths")
def test_live_migration_data_gb_plain(self, mock_paths):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", False)
guest = libvirt_guest.Guest(dom)
instance = objects.Instance(**self.test_instance)
data_gb = drvr._live_migration_data_gb(instance, guest, False)
self.assertEqual(2, data_gb)
self.assertEqual(0, mock_paths.call_count)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_live_migration_copy_disk_paths")
def test_live_migration_data_gb_block(self, mock_paths):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", False)
guest = libvirt_guest.Guest(dom)
instance = objects.Instance(**self.test_instance)
def fake_stat(path):
class StatResult(object):
def __init__(self, size):
self._size = size
@property
def st_size(self):
return self._size
if path == "/var/lib/nova/instance/123/disk.root":
return StatResult(10 * units.Gi)
elif path == "/dev/mapper/somevol":
return StatResult(1.5 * units.Gi)
else:
raise Exception("Should not be reached")
mock_paths.return_value = ["/var/lib/nova/instance/123/disk.root",
"/dev/mapper/somevol"]
with mock.patch.object(os, "stat") as mock_stat:
mock_stat.side_effect = fake_stat
data_gb = drvr._live_migration_data_gb(instance, guest, True)
# Expecting 2 GB for RAM, plus 10 GB for disk.root
# and 1.5 GB rounded to 2 GB for somevol, so 14 GB
self.assertEqual(14, data_gb)
self.assertEqual(1, mock_paths.call_count)
EXPECT_SUCCESS = 1
EXPECT_FAILURE = 2
EXPECT_ABORT = 3
@mock.patch.object(time, "time")
@mock.patch.object(time, "sleep",
side_effect=lambda x: eventlet.sleep(0))
@mock.patch.object(host.DomainJobInfo, "for_domain")
@mock.patch.object(objects.Instance, "save")
@mock.patch.object(fakelibvirt.Connection, "_mark_running")
@mock.patch.object(fakelibvirt.virDomain, "abortJob")
def _test_live_migration_monitoring(self,
job_info_records,
time_records,
expect_result,
mock_abort,
mock_running,
mock_save,
mock_job_info,
mock_sleep,
mock_time):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", True)
guest = libvirt_guest.Guest(dom)
finish_event = eventlet.event.Event()
def fake_job_info(hostself):
while True:
self.assertTrue(len(job_info_records) > 0)
rec = job_info_records.pop(0)
if type(rec) == str:
if rec == "thread-finish":
finish_event.send()
elif rec == "domain-stop":
dom.destroy()
else:
if len(time_records) > 0:
time_records.pop(0)
return rec
return rec
def fake_time():
if len(time_records) > 0:
return time_records[0]
else:
return int(
datetime.datetime(2001, 1, 20, 20, 1, 0)
.strftime('%s'))
mock_job_info.side_effect = fake_job_info
mock_time.side_effect = fake_time
dest = mock.sentinel.migrate_dest
migrate_data = mock.sentinel.migrate_data
fake_post_method = mock.MagicMock()
fake_recover_method = mock.MagicMock()
drvr._live_migration_monitor(self.context, instance,
guest, dest,
fake_post_method,
fake_recover_method,
False,
migrate_data,
dom,
finish_event)
if expect_result == self.EXPECT_SUCCESS:
self.assertFalse(fake_recover_method.called,
'Recover method called when success expected')
self.assertFalse(mock_abort.called,
'abortJob not called when success expected')
fake_post_method.assert_called_once_with(
self.context, instance, dest, False, migrate_data)
else:
if expect_result == self.EXPECT_ABORT:
self.assertTrue(mock_abort.called,
'abortJob called when abort expected')
else:
self.assertFalse(mock_abort.called,
'abortJob not called when failure expected')
self.assertFalse(fake_post_method.called,
'Post method called when success not expected')
fake_recover_method.assert_called_once_with(
self.context, instance, dest, False, migrate_data)
def test_live_migration_monitor_success(self):
# A normal sequence where see all the normal job states
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_SUCCESS)
def test_live_migration_monitor_success_race(self):
# A normalish sequence but we're too slow to see the
# completed job state
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_SUCCESS)
def test_live_migration_monitor_failed(self):
# A failed sequence where we see all the expected events
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_FAILED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_FAILURE)
def test_live_migration_monitor_failed_race(self):
# A failed sequence where we are too slow to see the
# failed event
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_FAILURE)
def test_live_migration_monitor_cancelled(self):
# A cancelled sequence where we see all the events
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_FAILURE)
@mock.patch.object(fakelibvirt.virDomain, "migrateSetMaxDowntime")
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_migration_downtime_steps")
def test_live_migration_monitor_downtime(self, mock_downtime_steps,
mock_set_downtime):
self.flags(live_migration_completion_timeout=1000000,
live_migration_progress_timeout=1000000,
group='libvirt')
# We've setup 4 fake downtime steps - first value is the
# time delay, second is the downtime value
downtime_steps = [
(90, 10),
(180, 50),
(270, 200),
(500, 300),
]
mock_downtime_steps.return_value = downtime_steps
# Each one of these fake times is used for time.time()
# when a new domain_info_records entry is consumed.
# Times are chosen so that only the first 3 downtime
# steps are needed.
fake_times = [0, 1, 30, 95, 150, 200, 300]
# A normal sequence where see all the normal job states
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
]
self._test_live_migration_monitoring(domain_info_records,
fake_times, self.EXPECT_SUCCESS)
mock_set_downtime.assert_has_calls([mock.call(10),
mock.call(50),
mock.call(200)])
def test_live_migration_monitor_completion(self):
self.flags(live_migration_completion_timeout=100,
live_migration_progress_timeout=1000000,
group='libvirt')
# Each one of these fake times is used for time.time()
# when a new domain_info_records entry is consumed.
fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320]
# A normal sequence where see all the normal job states
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
]
self._test_live_migration_monitoring(domain_info_records,
fake_times, self.EXPECT_ABORT)
def test_live_migration_monitor_progress(self):
self.flags(live_migration_completion_timeout=1000000,
live_migration_progress_timeout=150,
group='libvirt')
# Each one of these fake times is used for time.time()
# when a new domain_info_records entry is consumed.
fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320]
# A normal sequence where see all the normal job states
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
]
self._test_live_migration_monitoring(domain_info_records,
fake_times, self.EXPECT_ABORT)
def test_live_migration_downtime_steps(self):
self.flags(live_migration_downtime=400, group='libvirt')
self.flags(live_migration_downtime_steps=10, group='libvirt')
self.flags(live_migration_downtime_delay=30, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
steps = drvr._migration_downtime_steps(3.0)
self.assertEqual([
(0, 37),
(90, 38),
(180, 39),
(270, 42),
(360, 46),
(450, 55),
(540, 70),
(630, 98),
(720, 148),
(810, 238),
(900, 400),
], list(steps))
@mock.patch.object(utils, "spawn")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_monitor")
@mock.patch.object(host.Host, "get_guest")
@mock.patch.object(fakelibvirt.Connection, "_mark_running")
def test_live_migration_main(self, mock_running, mock_guest,
mock_monitor, mock_thread):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
dom = fakelibvirt.Domain(drvr._get_connection(),
"<domain><name>demo</name></domain>", True)
guest = libvirt_guest.Guest(dom)
migrate_data = {}
mock_guest.return_value = guest
def fake_post():
pass
def fake_recover():
pass
drvr._live_migration(self.context, instance, "fakehost",
fake_post, fake_recover, False,
migrate_data)
class AnyEventletEvent(object):
def __eq__(self, other):
return type(other) == eventlet.event.Event
mock_thread.assert_called_once_with(
drvr._live_migration_operation,
self.context, instance, "fakehost", False,
migrate_data, dom)
mock_monitor.assert_called_once_with(
self.context, instance, guest, "fakehost",
fake_post, fake_recover, False,
migrate_data, dom, AnyEventletEvent())
def _do_test_create_images_and_backing(self, disk_type):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_fetch_instance_kernel_ramdisk')
self.mox.StubOutWithMock(libvirt_driver.libvirt_utils, 'create_image')
disk_info = {'path': 'foo', 'type': disk_type,
'disk_size': 1 * 1024 ** 3,
'virt_disk_size': 20 * 1024 ** 3,
'backing_file': None}
libvirt_driver.libvirt_utils.create_image(
disk_info['type'], mox.IgnoreArg(), disk_info['virt_disk_size'])
drvr._fetch_instance_kernel_ramdisk(self.context, self.test_instance,
fallback_from_host=None)
self.mox.ReplayAll()
self.stubs.Set(os.path, 'exists', lambda *args: False)
drvr._create_images_and_backing(self.context, self.test_instance,
"/fake/instance/dir", [disk_info])
def test_create_images_and_backing_qcow2(self):
self._do_test_create_images_and_backing('qcow2')
def test_create_images_and_backing_raw(self):
self._do_test_create_images_and_backing('raw')
def test_create_images_and_backing_images_not_exist_no_fallback(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = [
{u'backing_file': u'fake_image_backing_file',
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824}]
self.test_instance.update({'user_id': 'fake-user',
'os_type': None,
'project_id': 'fake-project'})
instance = objects.Instance(**self.test_instance)
with mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image',
side_effect=exception.ImageNotFound(
image_id="fake_id")):
self.assertRaises(exception.ImageNotFound,
conn._create_images_and_backing,
self.context, instance,
"/fake/instance/dir", disk_info)
def test_create_images_and_backing_images_not_exist_fallback(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = [
{u'backing_file': u'fake_image_backing_file',
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824}]
base_dir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
self.test_instance.update({'user_id': 'fake-user',
'os_type': None,
'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'project_id': 'fake-project'})
instance = objects.Instance(**self.test_instance)
with test.nested(
mock.patch.object(libvirt_driver.libvirt_utils, 'copy_image'),
mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image',
side_effect=exception.ImageNotFound(
image_id="fake_id")),
) as (copy_image_mock, fetch_image_mock):
conn._create_images_and_backing(self.context, instance,
"/fake/instance/dir", disk_info,
fallback_from_host="fake_host")
backfile_path = os.path.join(base_dir, 'fake_image_backing_file')
kernel_path = os.path.join(CONF.instances_path,
self.test_instance['uuid'],
'kernel')
ramdisk_path = os.path.join(CONF.instances_path,
self.test_instance['uuid'],
'ramdisk')
copy_image_mock.assert_has_calls([
mock.call(dest=backfile_path, src=backfile_path,
host='fake_host', receive=True),
mock.call(dest=kernel_path, src=kernel_path,
host='fake_host', receive=True),
mock.call(dest=ramdisk_path, src=ramdisk_path,
host='fake_host', receive=True)
])
fetch_image_mock.assert_has_calls([
mock.call(context=self.context,
target=backfile_path,
image_id=self.test_instance['image_ref'],
user_id=self.test_instance['user_id'],
project_id=self.test_instance['project_id'],
max_size=25165824),
mock.call(self.context, kernel_path,
self.test_instance['kernel_id'],
self.test_instance['user_id'],
self.test_instance['project_id']),
mock.call(self.context, ramdisk_path,
self.test_instance['ramdisk_id'],
self.test_instance['user_id'],
self.test_instance['project_id']),
])
@mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image')
@mock.patch.object(os.path, 'exists', return_value=True)
def test_create_images_and_backing_images_exist(self, mock_exists,
mock_fetch_image):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = [
{u'backing_file': u'fake_image_backing_file',
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824}]
self.test_instance.update({'user_id': 'fake-user',
'os_type': None,
'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'project_id': 'fake-project'})
instance = objects.Instance(**self.test_instance)
with mock.patch.object(imagebackend.Image, 'get_disk_size'):
conn._create_images_and_backing(self.context, instance,
'/fake/instance/dir', disk_info)
self.assertFalse(mock_fetch_image.called)
def test_create_images_and_backing_ephemeral_gets_created(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = [
{u'backing_file': u'fake_image_backing_file',
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824},
{u'backing_file': u'ephemeral_1_default',
u'disk_size': 393216,
u'over_committed_disk_size': 1073348608,
u'path': u'disk_eph_path',
u'type': u'qcow2',
u'virt_disk_size': 1073741824}]
base_dir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
instance = objects.Instance(**self.test_instance)
with test.nested(
mock.patch.object(drvr, '_fetch_instance_kernel_ramdisk'),
mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image'),
mock.patch.object(drvr, '_create_ephemeral'),
mock.patch.object(imagebackend.Image, 'verify_base_size'),
mock.patch.object(imagebackend.Image, 'get_disk_size')
) as (fetch_kernel_ramdisk_mock, fetch_image_mock,
create_ephemeral_mock, verify_base_size_mock, disk_size_mock):
drvr._create_images_and_backing(self.context, instance,
"/fake/instance/dir",
disk_info)
self.assertEqual(len(create_ephemeral_mock.call_args_list), 1)
m_args, m_kwargs = create_ephemeral_mock.call_args_list[0]
self.assertEqual(
os.path.join(base_dir, 'ephemeral_1_default'),
m_kwargs['target'])
self.assertEqual(len(fetch_image_mock.call_args_list), 1)
m_args, m_kwargs = fetch_image_mock.call_args_list[0]
self.assertEqual(
os.path.join(base_dir, 'fake_image_backing_file'),
m_kwargs['target'])
verify_base_size_mock.assert_has_calls([
mock.call(os.path.join(base_dir, 'fake_image_backing_file'),
25165824),
mock.call(os.path.join(base_dir, 'ephemeral_1_default'),
1073741824)
])
def test_create_images_and_backing_disk_info_none(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_fetch_instance_kernel_ramdisk')
drvr._fetch_instance_kernel_ramdisk(self.context, self.test_instance,
fallback_from_host=None)
self.mox.ReplayAll()
drvr._create_images_and_backing(self.context, self.test_instance,
"/fake/instance/dir", None)
def test_pre_live_migration_works_correctly_mocked(self):
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': {'serial': '12345', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}},
'mount_device': '/dev/sda'},
{'connection_info': {'serial': '67890', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}},
'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
class FakeNetworkInfo(object):
def fixed_ips(self):
return ["test_ip_addr"]
def fake_none(*args, **kwargs):
return
self.stubs.Set(drvr, '_create_images_and_backing', fake_none)
instance = objects.Instance(**self.test_instance)
c = context.get_admin_context()
nw_info = FakeNetworkInfo()
# Creating mocks
self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
driver.block_device_info_get_mapping(vol
).AndReturn(vol['block_device_mapping'])
self.mox.StubOutWithMock(drvr, "_connect_volume")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
drvr._connect_volume(v['connection_info'],
disk_info)
self.mox.StubOutWithMock(drvr, 'plug_vifs')
drvr.plug_vifs(mox.IsA(instance), nw_info)
self.mox.ReplayAll()
result = drvr.pre_live_migration(
c, instance, vol, nw_info, None,
migrate_data={"block_migration": False})
target_ret = {
'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'},
'serial_listen_addr': '127.0.0.1',
'volume': {
'12345': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'},
'serial': '12345'},
'disk_info': {'bus': 'scsi',
'dev': 'sda',
'type': 'disk'}},
'67890': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'},
'serial': '67890'},
'disk_info': {'bus': 'scsi',
'dev': 'sdb',
'type': 'disk'}}}}
self.assertEqual(result, target_ret)
def test_pre_live_migration_block_with_config_drive_mocked(self):
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def fake_true(*args, **kwargs):
return True
self.stubs.Set(configdrive, 'required_by', fake_true)
instance = objects.Instance(**self.test_instance)
c = context.get_admin_context()
self.assertRaises(exception.NoLiveMigrationForConfigDriveInLibVirt,
drvr.pre_live_migration, c, instance, vol, None,
None, {'is_shared_instance_path': False,
'is_shared_block_storage': False})
@mock.patch('nova.virt.driver.block_device_info_get_mapping',
return_value=())
@mock.patch('nova.virt.configdrive.required_by',
return_value=True)
def test_pre_live_migration_block_with_config_drive_mocked_with_vfat(
self, mock_required_by, block_device_info_get_mapping):
self.flags(config_drive_format='vfat')
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
res_data = drvr.pre_live_migration(
self.context, instance, vol, [], None,
{'is_shared_instance_path': False,
'is_shared_block_storage': False})
block_device_info_get_mapping.assert_called_once_with(
{'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}
]}
)
self.assertEqual({'graphics_listen_addrs': {'spice': '127.0.0.1',
'vnc': '127.0.0.1'},
'serial_listen_addr': '127.0.0.1',
'volume': {}}, res_data)
def test_pre_live_migration_vol_backed_works_correctly_mocked(self):
# Creating testdata, using temp dir.
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
vol = {'block_device_mapping': [
{'connection_info': {'serial': '12345', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}},
'mount_device': '/dev/sda'},
{'connection_info': {'serial': '67890', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}},
'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def fake_none(*args, **kwargs):
return
self.stubs.Set(drvr, '_create_images_and_backing', fake_none)
class FakeNetworkInfo(object):
def fixed_ips(self):
return ["test_ip_addr"]
inst_ref = objects.Instance(**self.test_instance)
c = context.get_admin_context()
nw_info = FakeNetworkInfo()
# Creating mocks
self.mox.StubOutWithMock(drvr, "_connect_volume")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
drvr._connect_volume(v['connection_info'],
disk_info)
self.mox.StubOutWithMock(drvr, 'plug_vifs')
drvr.plug_vifs(mox.IsA(inst_ref), nw_info)
self.mox.ReplayAll()
migrate_data = {'is_shared_instance_path': False,
'is_volume_backed': True,
'block_migration': False,
'instance_relative_path': inst_ref['name']
}
ret = drvr.pre_live_migration(c, inst_ref, vol, nw_info, None,
migrate_data)
target_ret = {
'graphics_listen_addrs': {'spice': '127.0.0.1',
'vnc': '127.0.0.1'},
'serial_listen_addr': '127.0.0.1',
'volume': {
'12345': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'},
'serial': '12345'},
'disk_info': {'bus': 'scsi',
'dev': 'sda',
'type': 'disk'}},
'67890': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'},
'serial': '67890'},
'disk_info': {'bus': 'scsi',
'dev': 'sdb',
'type': 'disk'}}}}
self.assertEqual(ret, target_ret)
self.assertTrue(os.path.exists('%s/%s/' % (tmpdir,
inst_ref['name'])))
def test_pre_live_migration_plug_vifs_retry_fails(self):
self.flags(live_migration_retry_count=3)
instance = objects.Instance(**self.test_instance)
def fake_plug_vifs(instance, network_info):
raise processutils.ProcessExecutionError()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(eventlet.greenthread, 'sleep',
lambda x: eventlet.sleep(0))
disk_info_json = jsonutils.dumps({})
self.assertRaises(processutils.ProcessExecutionError,
drvr.pre_live_migration,
self.context, instance, block_device_info=None,
network_info=[], disk_info=disk_info_json)
def test_pre_live_migration_plug_vifs_retry_works(self):
self.flags(live_migration_retry_count=3)
called = {'count': 0}
instance = objects.Instance(**self.test_instance)
def fake_plug_vifs(instance, network_info):
called['count'] += 1
if called['count'] < CONF.live_migration_retry_count:
raise processutils.ProcessExecutionError()
else:
return
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(eventlet.greenthread, 'sleep',
lambda x: eventlet.sleep(0))
disk_info_json = jsonutils.dumps({})
drvr.pre_live_migration(self.context, instance, block_device_info=None,
network_info=[], disk_info=disk_info_json)
def test_pre_live_migration_image_not_created_with_shared_storage(self):
migrate_data_set = [{'is_shared_block_storage': False,
'block_migration': False},
{'is_shared_block_storage': True,
'block_migration': False},
{'is_shared_block_storage': False,
'block_migration': True}]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
# creating mocks
with test.nested(
mock.patch.object(drvr,
'_create_images_and_backing'),
mock.patch.object(drvr,
'ensure_filtering_rules_for_instance'),
mock.patch.object(drvr, 'plug_vifs'),
) as (
create_image_mock,
rules_mock,
plug_mock,
):
disk_info_json = jsonutils.dumps({})
for migrate_data in migrate_data_set:
res = drvr.pre_live_migration(self.context, instance,
block_device_info=None,
network_info=[],
disk_info=disk_info_json,
migrate_data=migrate_data)
self.assertFalse(create_image_mock.called)
self.assertIsInstance(res, dict)
def test_pre_live_migration_with_not_shared_instance_path(self):
migrate_data = {'is_shared_block_storage': False,
'is_shared_instance_path': False}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
def check_instance_dir(context, instance,
instance_dir, disk_info,
fallback_from_host=False):
self.assertTrue(instance_dir)
# creating mocks
with test.nested(
mock.patch.object(drvr,
'_create_images_and_backing',
side_effect=check_instance_dir),
mock.patch.object(drvr,
'ensure_filtering_rules_for_instance'),
mock.patch.object(drvr, 'plug_vifs'),
) as (
create_image_mock,
rules_mock,
plug_mock,
):
disk_info_json = jsonutils.dumps({})
res = drvr.pre_live_migration(self.context, instance,
block_device_info=None,
network_info=[],
disk_info=disk_info_json,
migrate_data=migrate_data)
create_image_mock.assert_has_calls(
[mock.call(self.context, instance, mock.ANY, {},
fallback_from_host=instance.host)])
self.assertIsInstance(res, dict)
def test_get_instance_disk_info_works_correctly(self):
# Test data
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance.name:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
os.path.getsize('/test/disk.local').AndReturn((3328599655))
ret = ("image: /test/disk\n"
"file format: raw\n"
"virtual size: 20G (21474836480 bytes)\n"
"disk size: 3.1G\n"
"cluster_size: 2097152\n"
"backing file: /test/dummy (actual path: /backing/file)\n")
self.mox.StubOutWithMock(os.path, "exists")
os.path.exists('/test/disk.local').AndReturn(True)
self.mox.StubOutWithMock(utils, "execute")
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/test/disk.local').AndReturn((ret, ''))
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_instance_disk_info(instance)
info = jsonutils.loads(info)
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 10737418240)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
self.assertEqual(info[1]['type'], 'qcow2')
self.assertEqual(info[1]['path'], '/test/disk.local')
self.assertEqual(info[1]['virt_disk_size'], 21474836480)
self.assertEqual(info[1]['backing_file'], "file")
self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
def test_post_live_migration(self):
vol = {'block_device_mapping': [
{'connection_info': {
'data': {'multipath_id': 'dummy1'},
'serial': 'fake_serial1'},
'mount_device': '/dev/sda',
},
{'connection_info': {
'data': {},
'serial': 'fake_serial2'},
'mount_device': '/dev/sdb', }]}
def fake_initialize_connection(context, volume_id, connector):
return {'data': {}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_connector = {'host': 'fake'}
inst_ref = {'id': 'foo'}
cntx = context.get_admin_context()
# Set up the mock expectations
with test.nested(
mock.patch.object(driver, 'block_device_info_get_mapping',
return_value=vol['block_device_mapping']),
mock.patch.object(drvr, "get_volume_connector",
return_value=fake_connector),
mock.patch.object(drvr._volume_api, "initialize_connection",
side_effect=fake_initialize_connection),
mock.patch.object(drvr, '_disconnect_volume')
) as (block_device_info_get_mapping, get_volume_connector,
initialize_connection, _disconnect_volume):
drvr.post_live_migration(cntx, inst_ref, vol)
block_device_info_get_mapping.assert_has_calls([
mock.call(vol)])
get_volume_connector.assert_has_calls([
mock.call(inst_ref)])
_disconnect_volume.assert_has_calls([
mock.call({'data': {'multipath_id': 'dummy1'}}, 'sda'),
mock.call({'data': {}}, 'sdb')])
def test_get_instance_disk_info_excludes_volumes(self):
# Test data
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/fake/path/to/volume1'/>"
"<target dev='vdc' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/fake/path/to/volume2'/>"
"<target dev='vdd' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance.name:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
os.path.getsize('/test/disk.local').AndReturn((3328599655))
ret = ("image: /test/disk\n"
"file format: raw\n"
"virtual size: 20G (21474836480 bytes)\n"
"disk size: 3.1G\n"
"cluster_size: 2097152\n"
"backing file: /test/dummy (actual path: /backing/file)\n")
self.mox.StubOutWithMock(os.path, "exists")
os.path.exists('/test/disk.local').AndReturn(True)
self.mox.StubOutWithMock(utils, "execute")
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/test/disk.local').AndReturn((ret, ''))
self.mox.ReplayAll()
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': [
{'connection_info': conn_info, 'mount_device': '/dev/vdc'},
{'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_instance_disk_info(instance,
block_device_info=info)
info = jsonutils.loads(info)
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 10737418240)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
self.assertEqual(info[1]['type'], 'qcow2')
self.assertEqual(info[1]['path'], '/test/disk.local')
self.assertEqual(info[1]['virt_disk_size'], 21474836480)
self.assertEqual(info[1]['backing_file'], "file")
self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
def test_get_instance_disk_info_no_bdinfo_passed(self):
# NOTE(ndipanov): _get_disk_overcomitted_size_total calls this method
# without access to Nova's block device information. We want to make
# sure that we guess volumes mostly correctly in that case as well
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='block'><driver name='qemu' type='raw'/>"
"<source file='/fake/path/to/volume1'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance.name:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_instance_disk_info(instance)
info = jsonutils.loads(info)
self.assertEqual(1, len(info))
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 10737418240)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
def test_spawn_with_network_info(self):
# Preparing mocks
def fake_none(*args, **kwargs):
return
def fake_getLibVersion():
return 9011
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
def fake_baselineCPU(cpu, flag):
return """<cpu mode='custom' match='exact'>
<model fallback='allow'>Penryn</model>
<vendor>Intel</vendor>
<feature policy='require' name='xtpr'/>
</cpu>
"""
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
getCapabilities=fake_getCapabilities,
getVersion=lambda: 1005001,
baselineCPU=fake_baselineCPU)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
instance = objects.Instance(**instance_ref)
image_meta = self.test_image_meta
# Mock out the get_info method of the LibvirtDriver so that the polling
# in the spawn method of the LibvirtDriver returns immediately
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, 'get_info')
libvirt_driver.LibvirtDriver.get_info(instance
).AndReturn(hardware.InstanceInfo(state=power_state.RUNNING))
# Start test
self.mox.ReplayAll()
with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr.firewall_driver,
'setup_basic_filtering',
fake_none)
self.stubs.Set(drvr.firewall_driver,
'prepare_instance_filter',
fake_none)
self.stubs.Set(imagebackend.Image,
'cache',
fake_none)
drvr.spawn(self.context, instance, image_meta, [], 'herp',
network_info=network_info)
path = os.path.join(CONF.instances_path, instance['name'])
if os.path.isdir(path):
shutil.rmtree(path)
path = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
if os.path.isdir(path):
shutil.rmtree(os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name))
def test_spawn_without_image_meta(self):
self.create_image_called = False
def fake_none(*args, **kwargs):
return
def fake_create_image(*args, **kwargs):
self.create_image_called = True
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_image', fake_create_image)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
drvr.spawn(self.context, instance,
self.test_image_meta, [], None)
self.assertTrue(self.create_image_called)
drvr.spawn(self.context, instance,
self.test_image_meta, [], None)
self.assertTrue(self.create_image_called)
def test_spawn_from_volume_calls_cache(self):
self.cache_called_for_disk = False
def fake_none(*args, **kwargs):
return
def fake_cache(*args, **kwargs):
if kwargs.get('image_id') == 'my_fake_image':
self.cache_called_for_disk = True
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(imagebackend.Image, 'cache', fake_cache)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
block_device_info = {'root_device_name': '/dev/vda',
'block_device_mapping': [
{'mount_device': 'vda',
'boot_index': 0}
]
}
# Volume-backed instance created without image
instance_ref = self.test_instance
instance_ref['image_ref'] = ''
instance_ref['root_device_name'] = '/dev/vda'
instance_ref['uuid'] = uuidutils.generate_uuid()
instance = objects.Instance(**instance_ref)
drvr.spawn(self.context, instance,
self.test_image_meta, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
# Booted from volume but with placeholder image
instance_ref = self.test_instance
instance_ref['image_ref'] = 'my_fake_image'
instance_ref['root_device_name'] = '/dev/vda'
instance_ref['uuid'] = uuidutils.generate_uuid()
instance = objects.Instance(**instance_ref)
drvr.spawn(self.context, instance,
self.test_image_meta, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
# Booted from an image
instance_ref['image_ref'] = 'my_fake_image'
instance_ref['uuid'] = uuidutils.generate_uuid()
instance = objects.Instance(**instance_ref)
drvr.spawn(self.context, instance,
self.test_image_meta, [], None)
self.assertTrue(self.cache_called_for_disk)
def test_start_lxc_from_volume(self):
self.flags(virt_type="lxc",
group='libvirt')
def check_setup_container(image, container_dir=None):
self.assertIsInstance(image, imgmodel.LocalBlockImage)
self.assertEqual(image.path, '/dev/path/to/dev')
return '/dev/nbd1'
bdm = {
'guest_format': None,
'boot_index': 0,
'mount_device': '/dev/sda',
'connection_info': {
'driver_volume_type': 'iscsi',
'serial': 'afc1',
'data': {
'access_mode': 'rw',
'target_discovered': False,
'encrypted': False,
'qos_specs': None,
'target_iqn': 'iqn: volume-afc1',
'target_portal': 'ip: 3260',
'volume_id': 'afc1',
'target_lun': 1,
'auth_password': 'uj',
'auth_username': '47',
'auth_method': 'CHAP'
}
},
'disk_bus': 'scsi',
'device_type': 'disk',
'delete_on_termination': False
}
def _connect_volume_side_effect(connection_info, disk_info):
bdm['connection_info']['data']['device_path'] = '/dev/path/to/dev'
def _get(key, opt=None):
return bdm.get(key, opt)
def getitem(key):
return bdm[key]
def setitem(key, val):
bdm[key] = val
bdm_mock = mock.MagicMock()
bdm_mock.__getitem__.side_effect = getitem
bdm_mock.__setitem__.side_effect = setitem
bdm_mock.get = _get
disk_mock = mock.MagicMock()
disk_mock.source_path = '/dev/path/to/dev'
block_device_info = {'block_device_mapping': [bdm_mock],
'root_device_name': '/dev/sda'}
# Volume-backed instance created without image
instance_ref = self.test_instance
instance_ref['image_ref'] = ''
instance_ref['root_device_name'] = '/dev/sda'
instance_ref['ephemeral_gb'] = 0
instance_ref['uuid'] = uuidutils.generate_uuid()
inst_obj = objects.Instance(**instance_ref)
image_meta = {}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, '_connect_volume',
side_effect=_connect_volume_side_effect),
mock.patch.object(drvr, '_get_volume_config',
return_value=disk_mock),
mock.patch.object(drvr, 'get_info',
return_value=hardware.InstanceInfo(
state=power_state.RUNNING)),
mock.patch('nova.virt.disk.api.setup_container',
side_effect=check_setup_container),
mock.patch('nova.virt.disk.api.teardown_container'),
mock.patch.object(objects.Instance, 'save')):
drvr.spawn(self.context, inst_obj, image_meta, [], None,
network_info=[],
block_device_info=block_device_info)
self.assertEqual('/dev/nbd1',
inst_obj.system_metadata.get(
'rootfs_device_name'))
def test_spawn_with_pci_devices(self):
def fake_none(*args, **kwargs):
return None
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
class FakeLibvirtPciDevice(object):
def dettach(self):
return None
def reset(self):
return None
def fake_node_device_lookup_by_name(address):
pattern = ("pci_%(hex)s{4}_%(hex)s{2}_%(hex)s{2}_%(oct)s{1}"
% dict(hex='[\da-f]', oct='[0-8]'))
pattern = re.compile(pattern)
if pattern.match(address) is None:
raise fakelibvirt.libvirtError()
return FakeLibvirtPciDevice()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_image', fake_none)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
drvr._conn.nodeDeviceLookupByName = \
fake_node_device_lookup_by_name
instance_ref = self.test_instance
instance_ref['image_ref'] = 'my_fake_image'
instance = objects.Instance(**instance_ref)
instance['pci_devices'] = objects.PciDeviceList(
objects=[objects.PciDevice(address='0000:00:00.0')])
drvr.spawn(self.context, instance,
self.test_image_meta, [], None)
def test_chown_disk_config_for_instance(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
self.mox.StubOutWithMock(fake_libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(fake_libvirt_utils, 'chown')
fake_libvirt_utils.get_instance_path(instance).AndReturn('/tmp/uuid')
os.path.exists('/tmp/uuid/disk.config').AndReturn(True)
fake_libvirt_utils.chown('/tmp/uuid/disk.config', os.getuid())
self.mox.ReplayAll()
drvr._chown_disk_config_for_instance(instance)
def _test_create_image_plain(self, os_type='', filename='', mkfs=False):
gotFiles = []
def fake_image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name, is_block_dev=False):
self.path = os.path.join(instance['name'], name)
self.is_block_dev = is_block_dev
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def resize_image(self, size):
pass
def cache(self, fetch_func, filename, size=None,
*args, **kwargs):
gotFiles.append({'filename': filename,
'size': size})
def snapshot(self, name):
pass
return FakeImage(instance, name)
def fake_none(*args, **kwargs):
return
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
# Stop 'libvirt_driver._create_image' touching filesystem
self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
fake_image)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
instance['os_type'] = os_type
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
if mkfs:
self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
{os_type: 'mkfs.ext4 --label %(fs_label)s %(target)s'})
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
drvr._create_image(context, instance, disk_info['mapping'])
drvr._get_guest_xml(self.context, instance, None,
disk_info, image_meta)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * units.Gi},
{'filename': filename,
'size': 20 * units.Gi},
]
self.assertEqual(gotFiles, wantFiles)
def test_create_image_plain_os_type_blank(self):
self._test_create_image_plain(os_type='',
filename=self._EPHEMERAL_20_DEFAULT,
mkfs=False)
def test_create_image_plain_os_type_none(self):
self._test_create_image_plain(os_type=None,
filename=self._EPHEMERAL_20_DEFAULT,
mkfs=False)
def test_create_image_plain_os_type_set_no_fs(self):
self._test_create_image_plain(os_type='test',
filename=self._EPHEMERAL_20_DEFAULT,
mkfs=False)
def test_create_image_plain_os_type_set_with_fs(self):
ephemeral_file_name = ('ephemeral_20_%s' % utils.get_hash_str(
'mkfs.ext4 --label %(fs_label)s %(target)s')[:7])
self._test_create_image_plain(os_type='test',
filename=ephemeral_file_name,
mkfs=True)
def _create_image_helper(self, callback, suffix=''):
gotFiles = []
imported_files = []
def fake_image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name, is_block_dev=False):
self.path = os.path.join(instance['name'], name)
self.is_block_dev = is_block_dev
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def resize_image(self, size):
pass
def cache(self, fetch_func, filename, size=None,
*args, **kwargs):
gotFiles.append({'filename': filename,
'size': size})
def import_file(self, instance, local_filename,
remote_filename):
imported_files.append((local_filename, remote_filename))
def snapshot(self, name):
pass
return FakeImage(instance, name)
def fake_none(*args, **kwargs):
return
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
# Stop 'libvirt_driver._create_image' touching filesystem
self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
fake_image)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
# NOTE(mikal): use this callback to tweak the instance to match
# what you're trying to test
callback(instance_ref)
instance = objects.Instance(**instance_ref)
# Turn on some swap to exercise that codepath in _create_image
instance.flavor.swap = 500
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
self.stubs.Set(instance_metadata, 'InstanceMetadata', fake_none)
self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder,
'make_drive', fake_none)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
drvr._create_image(context, instance, disk_info['mapping'],
suffix=suffix)
drvr._get_guest_xml(self.context, instance, None,
disk_info, image_meta)
return gotFiles, imported_files
def test_create_image_with_swap(self):
def enable_swap(instance_ref):
# Turn on some swap to exercise that codepath in _create_image
instance_ref['system_metadata']['instance_type_swap'] = 500
gotFiles, _ = self._create_image_helper(enable_swap)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * units.Gi},
{'filename': self._EPHEMERAL_20_DEFAULT,
'size': 20 * units.Gi},
{'filename': 'swap_500',
'size': 500 * units.Mi},
]
self.assertEqual(gotFiles, wantFiles)
def test_create_image_with_configdrive(self):
def enable_configdrive(instance_ref):
instance_ref['config_drive'] = 'true'
# Ensure that we create a config drive and then import it into the
# image backend store
_, imported_files = self._create_image_helper(enable_configdrive)
self.assertTrue(imported_files[0][0].endswith('/disk.config'))
self.assertEqual('disk.config', imported_files[0][1])
def test_create_image_with_configdrive_rescue(self):
def enable_configdrive(instance_ref):
instance_ref['config_drive'] = 'true'
# Ensure that we create a config drive and then import it into the
# image backend store
_, imported_files = self._create_image_helper(enable_configdrive,
suffix='.rescue')
self.assertTrue(imported_files[0][0].endswith('/disk.config.rescue'))
self.assertEqual('disk.config.rescue', imported_files[0][1])
@mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache',
side_effect=exception.ImageNotFound(image_id='fake-id'))
def test_create_image_not_exist_no_fallback(self, mock_cache):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
self.assertRaises(exception.ImageNotFound,
drvr._create_image,
self.context, instance, disk_info['mapping'])
@mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache')
def test_create_image_not_exist_fallback(self, mock_cache):
def side_effect(fetch_func, filename, size=None, *args, **kwargs):
def second_call(fetch_func, filename, size=None, *args, **kwargs):
# call copy_from_host ourselves because we mocked image.cache()
fetch_func('fake-target', 'fake-max-size')
# further calls have no side effect
mock_cache.side_effect = None
mock_cache.side_effect = second_call
# raise an error only the first call
raise exception.ImageNotFound(image_id='fake-id')
mock_cache.side_effect = side_effect
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
with mock.patch.object(libvirt_driver.libvirt_utils,
'copy_image') as mock_copy:
drvr._create_image(self.context, instance, disk_info['mapping'],
fallback_from_host='fake-source-host')
mock_copy.assert_called_once_with(src='fake-target',
dest='fake-target',
host='fake-source-host',
receive=True)
@mock.patch.object(utils, 'execute')
def test_create_ephemeral_specified_fs(self, mock_exec):
self.flags(default_ephemeral_format='ext3')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True, max_size=20,
specified_fs='ext4')
mock_exec.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '-L',
'myVol', '/dev/something',
run_as_root=True)
def test_create_ephemeral_specified_fs_not_valid(self):
CONF.set_override('default_ephemeral_format', 'ext4')
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'guest_format': 'dummy',
'size': 1}]
block_device_info = {
'ephemerals': ephemerals}
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
image_meta = objects.ImageMeta.from_dict({'disk_format': 'raw'})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
disk_info['mapping'].pop('disk.local')
with test.nested(
mock.patch.object(utils, 'execute'),
mock.patch.object(drvr, 'get_info'),
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(imagebackend.Image, 'verify_base_size'),
mock.patch.object(imagebackend.Image, 'get_disk_size')):
self.assertRaises(exception.InvalidBDMFormat, drvr._create_image,
context, instance, disk_info['mapping'],
block_device_info=block_device_info)
def test_create_ephemeral_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol',
'/dev/something', run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True, max_size=20)
def test_create_ephemeral_with_conf(self):
CONF.set_override('default_ephemeral_format', 'ext4')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol',
'/dev/something', run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True)
def test_create_ephemeral_with_arbitrary(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
{'linux': 'mkfs.ext4 --label %(fs_label)s %(target)s'})
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs.ext4', '--label', 'myVol', '/dev/something',
run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True)
def test_create_ephemeral_with_ext3(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
{'linux': 'mkfs.ext3 --label %(fs_label)s %(target)s'})
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs.ext3', '--label', 'myVol', '/dev/something',
run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True)
def test_create_swap_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkswap', '/dev/something', run_as_root=False)
self.mox.ReplayAll()
drvr._create_swap('/dev/something', 1, max_size=20)
def test_get_console_output_file(self):
fake_libvirt_utils.files['console.log'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = objects.Instance(**instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
console_log = '%s/console.log' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='file'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % console_log
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
with mock.patch('os.path.exists', return_value=True):
output = drvr.get_console_output(self.context, instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEqual('67890', output)
def test_get_console_output_file_missing(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = objects.Instance(**instance_ref)
console_log = os.path.join(tmpdir, instance['name'],
'non-existent.log')
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='file'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % console_log
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch('os.path.exists', return_value=False):
output = drvr.get_console_output(self.context, instance)
self.assertEqual('', output)
def test_get_console_output_pty(self):
fake_libvirt_utils.files['pty'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = objects.Instance(**instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
pty_file = '%s/fake_pty' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='pty'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % pty_file
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
def _fake_flush(self, fake_pty):
return 'foo'
def _fake_append_to_file(self, data, fpath):
return 'pty'
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush
libvirt_driver.LibvirtDriver._append_to_file = _fake_append_to_file
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
output = drvr.get_console_output(self.context, instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEqual('67890', output)
def test_get_host_ip_addr(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ip = drvr.get_host_ip_addr()
self.assertEqual(ip, CONF.my_ip)
@mock.patch.object(libvirt_driver.LOG, 'warn')
@mock.patch('nova.compute.utils.get_machine_ips')
def test_get_host_ip_addr_failure(self, mock_ips, mock_log):
mock_ips.return_value = ['8.8.8.8', '75.75.75.75']
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.get_host_ip_addr()
mock_log.assert_called_once_with(u'my_ip address (%(my_ip)s) was '
u'not found on any of the '
u'interfaces: %(ifaces)s',
{'ifaces': '8.8.8.8, 75.75.75.75',
'my_ip': mock.ANY})
def test_conn_event_handler(self):
self.mox.UnsetStubs()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
with test.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
mock.patch.object(drvr._host, "_init_events",
return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
# verify that the driver registers for the close callback
# and re-connects after receiving the callback
self.assertRaises(exception.HypervisorUnavailable,
drvr.init_host,
"wibble")
self.assertTrue(service_mock.disabled)
def test_command_with_broken_connection(self):
self.mox.UnsetStubs()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
with test.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
mock.patch.object(drvr._host, "_init_events",
return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
drvr.init_host("wibble")
self.assertRaises(exception.HypervisorUnavailable,
drvr.get_num_instances)
self.assertTrue(service_mock.disabled)
def test_service_resume_after_broken_connection(self):
self.mox.UnsetStubs()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = True
with test.nested(
mock.patch.object(drvr._host, "_connect",
return_value=mock.MagicMock()),
mock.patch.object(drvr._host, "_init_events",
return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
drvr.init_host("wibble")
drvr.get_num_instances()
self.assertTrue(not service_mock.disabled and
service_mock.disabled_reason is None)
@mock.patch.object(objects.Instance, 'save')
def test_immediate_delete(self, mock_save):
def fake_get_domain(instance):
raise exception.InstanceNotFound(instance_id=instance.uuid)
def fake_delete_instance_files(instance):
pass
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, {})
mock_save.assert_called_once_with()
@mock.patch.object(objects.Instance, 'get_by_uuid')
@mock.patch.object(objects.Instance, 'obj_load_attr', autospec=True)
@mock.patch.object(objects.Instance, 'save', autospec=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_disconnect_volume')
@mock.patch.object(driver, 'block_device_info_get_mapping')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def _test_destroy_removes_disk(self, mock_undefine_domain, mock_mapping,
mock_disconnect_volume,
mock_delete_instance_files, mock_destroy,
mock_inst_save, mock_inst_obj_load_attr,
mock_get_by_uuid, volume_fail=False):
instance = objects.Instance(self.context, **self.test_instance)
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
mock_mapping.return_value = vol['block_device_mapping']
mock_delete_instance_files.return_value = True
mock_get_by_uuid.return_value = instance
if volume_fail:
mock_disconnect_volume.return_value = (
exception.VolumeNotFound('vol'))
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.destroy(self.context, instance, [], vol)
def test_destroy_removes_disk(self):
self._test_destroy_removes_disk(volume_fail=False)
def test_destroy_removes_disk_volume_fails(self):
self._test_destroy_removes_disk(volume_fail=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'unplug_vifs')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def test_destroy_not_removes_disk(self, mock_undefine_domain, mock_destroy,
mock_unplug_vifs):
instance = fake_instance.fake_instance_obj(
None, name='instancename', id=1,
uuid='875a8070-d0b9-4949-8b31-104d125c9a64')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.destroy(self.context, instance, [], None, False)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
@mock.patch.object(host.Host, 'get_domain')
def test_destroy_lxc_calls_teardown_container(self, mock_get_domain,
mock_teardown_container,
mock_cleanup):
self.flags(virt_type='lxc', group='libvirt')
fake_domain = FakeVirtDomain()
def destroy_side_effect(*args, **kwargs):
fake_domain._info[0] = power_state.SHUTDOWN
with mock.patch.object(fake_domain, 'destroy',
side_effect=destroy_side_effect) as mock_domain_destroy:
mock_get_domain.return_value = fake_domain
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = []
drvr.destroy(self.context, instance, network_info, None, False)
mock_get_domain.assert_has_calls([mock.call(instance),
mock.call(instance)])
mock_domain_destroy.assert_called_once_with()
mock_teardown_container.assert_called_once_with(instance)
mock_cleanup.assert_called_once_with(self.context, instance,
network_info, None, False,
None)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
@mock.patch.object(host.Host, 'get_domain')
def test_destroy_lxc_calls_teardown_container_when_no_domain(self,
mock_get_domain, mock_teardown_container, mock_cleanup):
self.flags(virt_type='lxc', group='libvirt')
instance = objects.Instance(**self.test_instance)
inf_exception = exception.InstanceNotFound(instance_id=instance.uuid)
mock_get_domain.side_effect = inf_exception
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = []
drvr.destroy(self.context, instance, network_info, None, False)
mock_get_domain.assert_has_calls([mock.call(instance),
mock.call(instance)])
mock_teardown_container.assert_called_once_with(instance)
mock_cleanup.assert_called_once_with(self.context, instance,
network_info, None, False,
None)
def test_reboot_different_ids(self):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
def wait(self):
return None
self.flags(wait_soft_reboot_seconds=1, group='libvirt')
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
self.reboot_create_called = False
# Mock domain
mock_domain = self.mox.CreateMock(fakelibvirt.virDomain)
mock_domain.info().AndReturn(
(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_domain.ID().AndReturn('some_fake_id')
mock_domain.ID().AndReturn('some_fake_id')
mock_domain.shutdown()
mock_domain.info().AndReturn(
(libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple)
mock_domain.ID().AndReturn('some_other_fake_id')
mock_domain.ID().AndReturn('some_other_fake_id')
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock_domain
def fake_create_domain(**kwargs):
self.reboot_create_called = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, '_create_domain', fake_create_domain)
self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
self.stubs.Set(pci_manager, 'get_instance_pci_devs', lambda *a: [])
drvr.reboot(None, instance, [], 'SOFT')
self.assertTrue(self.reboot_create_called)
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def test_reboot_same_ids(self, mock_get_domain, mock_hard_reboot,
mock_sleep, mock_loopingcall,
mock_get_instance_pci_devs):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
def wait(self):
return None
self.flags(wait_soft_reboot_seconds=1, group='libvirt')
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
self.reboot_hard_reboot_called = False
# Mock domain
mock_domain = mock.Mock(fakelibvirt.virDomain)
return_values = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple,
(libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple]
mock_domain.info.side_effect = return_values
mock_domain.ID.return_value = 'some_fake_id'
mock_domain.shutdown.side_effect = mock.Mock()
def fake_hard_reboot(*args, **kwargs):
self.reboot_hard_reboot_called = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_get_domain.return_value = mock_domain
mock_hard_reboot.side_effect = fake_hard_reboot
mock_loopingcall.return_value = FakeLoopingCall()
mock_get_instance_pci_devs.return_value = []
drvr.reboot(None, instance, [], 'SOFT')
self.assertTrue(self.reboot_hard_reboot_called)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def test_soft_reboot_libvirt_exception(self, mock_get_domain,
mock_hard_reboot):
# Tests that a hard reboot is performed when a soft reboot results
# in raising a libvirtError.
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
# setup mocks
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
mock_virDomain.info.return_value = (
(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_virDomain.ID.return_value = 'some_fake_id'
mock_virDomain.shutdown.side_effect = fakelibvirt.libvirtError('Err')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
context = None
instance = objects.Instance(**self.test_instance)
network_info = []
mock_get_domain.return_value = mock_virDomain
drvr.reboot(context, instance, network_info, 'SOFT')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def _test_resume_state_on_host_boot_with_state(self, state,
mock_get_domain,
mock_hard_reboot):
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
mock_virDomain.info.return_value = ([state, None, None, None, None])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.return_value = mock_virDomain
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drvr.resume_state_on_host_boot(self.context, instance, network_info,
block_device_info=None)
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
self.assertEqual(mock_hard_reboot.called, state not in ignored_states)
def test_resume_state_on_host_boot_with_running_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
def test_resume_state_on_host_boot_with_suspended_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.SUSPENDED)
def test_resume_state_on_host_boot_with_paused_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.PAUSED)
def test_resume_state_on_host_boot_with_nostate(self):
self._test_resume_state_on_host_boot_with_state(power_state.NOSTATE)
def test_resume_state_on_host_boot_with_shutdown_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
def test_resume_state_on_host_boot_with_crashed_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.CRASHED)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def test_resume_state_on_host_boot_with_instance_not_found_on_driver(
self, mock_get_domain, mock_hard_reboot):
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.side_effect = exception.InstanceNotFound(
instance_id='fake')
drvr.resume_state_on_host_boot(self.context, instance, network_info=[],
block_device_info=None)
mock_hard_reboot.assert_called_once_with(self.context,
instance, [], None)
@mock.patch('nova.virt.libvirt.LibvirtDriver.get_info')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_xml')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info')
@mock.patch('nova.virt.libvirt.blockinfo.get_disk_info')
@mock.patch('nova.virt.libvirt.LibvirtDriver._destroy')
def test_hard_reboot(self, mock_destroy, mock_get_disk_info,
mock_get_instance_disk_info, mock_get_guest_xml,
mock_create_images_and_backing,
mock_create_domain_and_network, mock_get_info):
self.context.auth_token = True # any non-None value will suffice
instance = objects.Instance(**self.test_instance)
instance_path = libvirt_utils.get_instance_path(instance)
network_info = _fake_network_info(self.stubs, 1)
block_device_info = None
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
return_values = [hardware.InstanceInfo(state=power_state.SHUTDOWN),
hardware.InstanceInfo(state=power_state.RUNNING)]
mock_get_info.side_effect = return_values
backing_disk_info = [{"virt_disk_size": 2}]
mock_get_disk_info.return_value = mock.sentinel.disk_info
mock_get_guest_xml.return_value = dummyxml
mock_get_instance_disk_info.return_value = backing_disk_info
drvr._hard_reboot(self.context, instance, network_info,
block_device_info)
# make sure that _create_images_and_backing is passed the disk_info
# returned from _get_instance_disk_info and not the one that is in
# scope from blockinfo.get_disk_info
mock_create_images_and_backing.assert_called_once_with(self.context,
instance, instance_path, backing_disk_info)
# make sure that _create_domain_and_network is passed the disk_info
# returned from blockinfo.get_disk_info and not the one that's
# returned from _get_instance_disk_info
mock_create_domain_and_network.assert_called_once_with(self.context,
dummyxml, instance, network_info, mock.sentinel.disk_info,
block_device_info=block_device_info,
reboot=True, vifs_already_plugged=True)
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall')
@mock.patch('nova.pci.manager.get_instance_pci_devs')
@mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info')
@mock.patch('nova.virt.libvirt.utils.write_to_file')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_config')
@mock.patch('nova.virt.libvirt.blockinfo.get_disk_info')
@mock.patch('nova.virt.libvirt.LibvirtDriver._destroy')
def test_hard_reboot_does_not_call_glance_show(self,
mock_destroy, mock_get_disk_info, mock_get_guest_config,
mock_get_instance_path, mock_write_to_file,
mock_get_instance_disk_info, mock_create_images_and_backing,
mock_create_domand_and_network, mock_prepare_pci_devices_for_use,
mock_get_instance_pci_devs, mock_looping_call, mock_ensure_tree):
"""For a hard reboot, we shouldn't need an additional call to glance
to get the image metadata.
This is important for automatically spinning up instances on a
host-reboot, since we won't have a user request context that'll allow
the Glance request to go through. We have to rely on the cached image
metadata, instead.
https://bugs.launchpad.net/nova/+bug/1339386
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
network_info = mock.MagicMock()
block_device_info = mock.MagicMock()
mock_get_disk_info.return_value = {}
mock_get_guest_config.return_value = mock.MagicMock()
mock_get_instance_path.return_value = '/foo'
mock_looping_call.return_value = mock.MagicMock()
drvr._image_api = mock.MagicMock()
drvr._hard_reboot(self.context, instance, network_info,
block_device_info)
self.assertFalse(drvr._image_api.get.called)
mock_ensure_tree.assert_called_once_with('/foo')
def test_suspend(self):
guest = libvirt_guest.Guest(FakeVirtDomain(id=1))
dom = guest._domain
instance = objects.Instance(**self.test_instance)
instance.ephemeral_key_uuid = None
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@mock.patch.object(dmcrypt, 'delete_volume')
@mock.patch.object(conn, '_get_instance_disk_info', return_value=[])
@mock.patch.object(conn, '_detach_sriov_ports')
@mock.patch.object(conn, '_detach_pci_devices')
@mock.patch.object(pci_manager, 'get_instance_pci_devs',
return_value='pci devs')
@mock.patch.object(conn._host, 'get_guest', return_value=guest)
def suspend(mock_get_guest, mock_get_instance_pci_devs,
mock_detach_pci_devices, mock_detach_sriov_ports,
mock_get_instance_disk_info, mock_delete_volume):
mock_managedSave = mock.Mock()
dom.managedSave = mock_managedSave
conn.suspend(self.context, instance)
mock_managedSave.assert_called_once_with(0)
self.assertEqual(mock_get_instance_disk_info.called, False)
mock_delete_volume.assert_has_calls([mock.call(disk['path'])
for disk in mock_get_instance_disk_info.return_value], False)
suspend()
@mock.patch.object(time, 'sleep')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_create_domain')
@mock.patch.object(host.Host, 'get_domain')
def _test_clean_shutdown(self, mock_get_domain, mock_create_domain,
mock_sleep, seconds_to_shutdown,
timeout, retry_interval,
shutdown_attempts, succeeds):
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
shutdown_count = []
# Mock domain
mock_domain = mock.Mock(fakelibvirt.virDomain)
return_infos = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple]
return_shutdowns = [shutdown_count.append("shutdown")]
retry_countdown = retry_interval
for x in range(min(seconds_to_shutdown, timeout)):
return_infos.append(
(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple)
if retry_countdown == 0:
return_shutdowns.append(shutdown_count.append("shutdown"))
retry_countdown = retry_interval
else:
retry_countdown -= 1
if seconds_to_shutdown < timeout:
return_infos.append(
(libvirt_guest.VIR_DOMAIN_SHUTDOWN,) + info_tuple)
mock_domain.info.side_effect = return_infos
mock_domain.shutdown.side_effect = return_shutdowns
def fake_create_domain(**kwargs):
self.reboot_create_called = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_get_domain.return_value = mock_domain
mock_create_domain.side_effect = fake_create_domain
result = drvr._clean_shutdown(instance, timeout, retry_interval)
self.assertEqual(succeeds, result)
self.assertEqual(shutdown_attempts, len(shutdown_count))
def test_clean_shutdown_first_time(self):
self._test_clean_shutdown(seconds_to_shutdown=2,
timeout=5,
retry_interval=3,
shutdown_attempts=1,
succeeds=True)
def test_clean_shutdown_with_retry(self):
self._test_clean_shutdown(seconds_to_shutdown=4,
timeout=5,
retry_interval=3,
shutdown_attempts=2,
succeeds=True)
def test_clean_shutdown_failure(self):
self._test_clean_shutdown(seconds_to_shutdown=6,
timeout=5,
retry_interval=3,
shutdown_attempts=2,
succeeds=False)
def test_clean_shutdown_no_wait(self):
self._test_clean_shutdown(seconds_to_shutdown=6,
timeout=0,
retry_interval=3,
shutdown_attempts=1,
succeeds=False)
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
@mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
@mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=None)
def test_attach_sriov_ports(self,
mock_get_image_metadata,
mock_ID,
mock_attachDevice):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
guest = libvirt_guest.Guest(FakeVirtDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._attach_sriov_ports(self.context, instance, guest, network_info)
mock_get_image_metadata.assert_called_once_with(
instance.system_metadata)
self.assertTrue(mock_attachDevice.called)
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
@mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
@mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=None)
def test_attach_sriov_ports_with_info_cache(self,
mock_get_image_metadata,
mock_ID,
mock_attachDevice):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
guest = libvirt_guest.Guest(FakeVirtDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._attach_sriov_ports(self.context, instance, guest, None)
mock_get_image_metadata.assert_called_once_with(
instance.system_metadata)
self.assertTrue(mock_attachDevice.called)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch.object(FakeVirtDomain, 'detachDeviceFlags')
@mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=None)
def test_detach_sriov_ports(self,
mock_get_image_metadata,
mock_detachDeviceFlags,
mock_has_min_version):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
domain = FakeVirtDomain()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
guest = libvirt_guest.Guest(domain)
drvr._detach_sriov_ports(self.context, instance, guest)
mock_get_image_metadata.assert_called_once_with(
instance.system_metadata)
self.assertTrue(mock_detachDeviceFlags.called)
def test_resume(self):
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
block_device_info = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
guest = libvirt_guest.Guest('fake_dom')
with test.nested(
mock.patch.object(drvr, '_get_existing_domain_xml',
return_value=dummyxml),
mock.patch.object(drvr, '_create_domain_and_network',
return_value=guest),
mock.patch.object(drvr, '_attach_pci_devices'),
mock.patch.object(pci_manager, 'get_instance_pci_devs',
return_value='fake_pci_devs'),
mock.patch.object(utils, 'get_image_from_system_metadata'),
mock.patch.object(blockinfo, 'get_disk_info'),
) as (_get_existing_domain_xml, _create_domain_and_network,
_attach_pci_devices, get_instance_pci_devs, get_image_metadata,
get_disk_info):
get_image_metadata.return_value = {'bar': 234}
disk_info = {'foo': 123}
get_disk_info.return_value = disk_info
drvr.resume(self.context, instance, network_info,
block_device_info)
_get_existing_domain_xml.assert_has_calls([mock.call(instance,
network_info, block_device_info)])
_create_domain_and_network.assert_has_calls([mock.call(
self.context, dummyxml,
instance, network_info, disk_info,
block_device_info=block_device_info,
vifs_already_plugged=True)])
_attach_pci_devices.assert_has_calls([mock.call(guest,
'fake_pci_devs')])
@mock.patch.object(host.Host, 'get_domain')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files')
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines(self, mock_save, mock_delete_instance_files,
mock_get_info, mock_get_domain):
dom_mock = mock.MagicMock()
dom_mock.undefineFlags.return_value = 1
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.return_value = dom_mock
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.SHUTDOWN, id=-1)
mock_delete_instance_files.return_value = None
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
@mock.patch.object(rbd_utils, 'RBDDriver')
def test_cleanup_rbd(self, mock_driver):
driver = mock_driver.return_value
driver.cleanup_volumes = mock.Mock()
fake_instance = {'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._cleanup_rbd(fake_instance)
driver.cleanup_volumes.assert_called_once_with(fake_instance)
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines_no_undefine_flags(self, mock_save):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(fakelibvirt.libvirtError('Err'))
mock.ID().AndReturn(123)
mock.undefine()
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock
def fake_get_info(instance_name):
return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1)
def fake_delete_instance_files(instance):
return None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'get_info', fake_get_info)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines_no_attribute_with_managed_save(self, mock_save):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndReturn(True)
mock.managedSaveRemove(0)
mock.undefine()
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock
def fake_get_info(instance_name):
return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1)
def fake_delete_instance_files(instance):
return None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'get_info', fake_get_info)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines_no_attribute_no_managed_save(self, mock_save):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndRaise(AttributeError())
mock.undefine()
self.mox.ReplayAll()
def fake_get_domain(self, instance):
return mock
def fake_get_info(instance_name):
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
def fake_delete_instance_files(instance):
return None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'get_info', fake_get_info)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
def test_destroy_timed_out(self):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy().AndRaise(fakelibvirt.libvirtError("timed out"))
self.mox.ReplayAll()
def fake_get_domain(self, instance):
return mock
def fake_get_error_code(self):
return fakelibvirt.VIR_ERR_OPERATION_TIMEOUT
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
self.stubs.Set(fakelibvirt.libvirtError, 'get_error_code',
fake_get_error_code)
instance = objects.Instance(**self.test_instance)
self.assertRaises(exception.InstancePowerOffFailure,
drvr.destroy, self.context, instance, [])
def test_private_destroy_not_found(self):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"No such domain",
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy().AndRaise(ex)
mock.info().AndRaise(ex)
mock.UUIDString()
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
instance = objects.Instance(**self.test_instance)
# NOTE(vish): verifies destroy doesn't raise if the instance disappears
drvr._destroy(instance)
def test_private_destroy_lxc_processes_refused_to_die(self):
self.flags(virt_type='lxc', group='libvirt')
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError, "",
error_message="internal error: Some processes refused to die",
error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(conn._host, 'get_domain') as mock_get_domain, \
mock.patch.object(conn, 'get_info') as mock_get_info:
mock_domain = mock.MagicMock()
mock_domain.ID.return_value = 1
mock_get_domain.return_value = mock_domain
mock_domain.destroy.side_effect = ex
mock_info = mock.MagicMock()
mock_info.id = 1
mock_info.state = power_state.SHUTDOWN
mock_get_info.return_value = mock_info
instance = objects.Instance(**self.test_instance)
conn._destroy(instance)
def test_private_destroy_processes_refused_to_die_still_raises(self):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError, "",
error_message="internal error: Some processes refused to die",
error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(conn._host, 'get_domain') as mock_get_domain:
mock_domain = mock.MagicMock()
mock_domain.ID.return_value = 1
mock_get_domain.return_value = mock_domain
mock_domain.destroy.side_effect = ex
instance = objects.Instance(**self.test_instance)
self.assertRaises(fakelibvirt.libvirtError, conn._destroy,
instance)
def test_private_destroy_ebusy_timeout(self):
# Tests that _destroy will retry 3 times to destroy the guest when an
# EBUSY is raised, but eventually times out and raises the libvirtError
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
("Failed to terminate process 26425 with SIGKILL: "
"Device or resource busy"),
error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR,
int1=errno.EBUSY)
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
mock_guest.poweroff = mock.Mock(side_effect=ex)
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr._host, 'get_guest',
return_value=mock_guest):
self.assertRaises(fakelibvirt.libvirtError, drvr._destroy,
instance)
self.assertEqual(3, mock_guest.poweroff.call_count)
def test_private_destroy_ebusy_multiple_attempt_ok(self):
# Tests that the _destroy attempt loop is broken when EBUSY is no
# longer raised.
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
("Failed to terminate process 26425 with SIGKILL: "
"Device or resource busy"),
error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR,
int1=errno.EBUSY)
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
mock_guest.poweroff = mock.Mock(side_effect=[ex, None])
inst_info = hardware.InstanceInfo(power_state.SHUTDOWN, id=1)
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr._host, 'get_guest',
return_value=mock_guest):
with mock.patch.object(drvr, 'get_info', return_value=inst_info):
drvr._destroy(instance)
self.assertEqual(2, mock_guest.poweroff.call_count)
def test_undefine_domain_with_not_found_instance(self):
def fake_get_domain(self, instance):
raise exception.InstanceNotFound(instance_id=instance.uuid)
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
self.mox.StubOutWithMock(fakelibvirt.libvirtError, "get_error_code")
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
# NOTE(wenjianhn): verifies undefine doesn't raise if the
# instance disappears
drvr._undefine_domain(instance)
@mock.patch.object(host.Host, "list_instance_domains")
def test_disk_over_committed_size_total(self, mock_list):
# Ensure destroy calls managedSaveRemove for saved instance.
class DiagFakeDomain(object):
def __init__(self, name):
self._name = name
def ID(self):
return 1
def name(self):
return self._name
def UUIDString(self):
return "19479fee-07a5-49bb-9138-d3738280d63c"
def XMLDesc(self, flags):
return "<domain/>"
mock_list.return_value = [
DiagFakeDomain("instance0000001"),
DiagFakeDomain("instance0000002")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_disks = {'instance0000001':
[{'type': 'qcow2', 'path': '/somepath/disk1',
'virt_disk_size': '10737418240',
'backing_file': '/somepath/disk1',
'disk_size': '83886080',
'over_committed_disk_size': '10653532160'}],
'instance0000002':
[{'type': 'raw', 'path': '/somepath/disk2',
'virt_disk_size': '0',
'backing_file': '/somepath/disk2',
'disk_size': '10737418240',
'over_committed_disk_size': '0'}]}
def get_info(instance_name, xml, **kwargs):
return fake_disks.get(instance_name)
with mock.patch.object(drvr,
"_get_instance_disk_info") as mock_info:
mock_info.side_effect = get_info
result = drvr._get_disk_over_committed_size_total()
self.assertEqual(result, 10653532160)
mock_list.assert_called_with()
self.assertTrue(mock_info.called)
@mock.patch.object(host.Host, "list_instance_domains")
def test_disk_over_committed_size_total_eperm(self, mock_list):
# Ensure destroy calls managedSaveRemove for saved instance.
class DiagFakeDomain(object):
def __init__(self, name):
self._name = name
def ID(self):
return 1
def name(self):
return self._name
def UUIDString(self):
return "19479fee-07a5-49bb-9138-d3738280d63c"
def XMLDesc(self, flags):
return "<domain/>"
mock_list.return_value = [
DiagFakeDomain("instance0000001"),
DiagFakeDomain("instance0000002")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_disks = {'instance0000001':
[{'type': 'qcow2', 'path': '/somepath/disk1',
'virt_disk_size': '10737418240',
'backing_file': '/somepath/disk1',
'disk_size': '83886080',
'over_committed_disk_size': '10653532160'}],
'instance0000002':
[{'type': 'raw', 'path': '/somepath/disk2',
'virt_disk_size': '0',
'backing_file': '/somepath/disk2',
'disk_size': '10737418240',
'over_committed_disk_size': '21474836480'}]}
def side_effect(name, dom):
if name == 'instance0000001':
raise OSError(errno.EACCES, 'Permission denied')
if name == 'instance0000002':
return fake_disks.get(name)
get_disk_info = mock.Mock()
get_disk_info.side_effect = side_effect
drvr._get_instance_disk_info = get_disk_info
result = drvr._get_disk_over_committed_size_total()
self.assertEqual(21474836480, result)
mock_list.assert_called_with()
@mock.patch.object(host.Host, "list_instance_domains",
return_value=[mock.MagicMock(name='foo')])
@mock.patch.object(libvirt_driver.LibvirtDriver, "_get_instance_disk_info",
side_effect=exception.VolumeBDMPathNotFound(path='bar'))
def test_disk_over_committed_size_total_bdm_not_found(self,
mock_get_disk_info,
mock_list_domains):
# Tests that we handle VolumeBDMPathNotFound gracefully.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(0, drvr._get_disk_over_committed_size_total())
def test_cpu_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigCPU()
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
cpu.arch = arch.X86_64
cpu.cells = 1
cpu.cores = 2
cpu.threads = 1
cpu.sockets = 4
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic"))
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow"))
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = arch.X86_64
guest.domtype = ["kvm"]
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = arch.I686
guest.domtype = ["kvm"]
caps.guests.append(guest)
return caps
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
want = {"vendor": "AMD",
"features": set(["extapic", "3dnow"]),
"model": "Opteron_G4",
"arch": arch.X86_64,
"topology": {"cells": 1, "cores": 2, "threads": 1,
"sockets": 4}}
got = drvr._get_cpu_info()
self.assertEqual(want, got)
def test_get_pcidev_info(self):
def fake_nodeDeviceLookupByName(self, name):
return FakeNodeDevice(_fake_NodeDevXml[name])
self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name')
host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actualvf = drvr._get_pcidev_info("pci_0000_04_00_3")
expect_vf = {
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:00.3",
"product_id": '1521',
"numa_node": None,
"vendor_id": '8086',
"label": 'label_8086_1521',
"dev_type": fields.PciDeviceType.SRIOV_PF,
}
self.assertEqual(expect_vf, actualvf)
actualvf = drvr._get_pcidev_info("pci_0000_04_10_7")
expect_vf = {
"dev_id": "pci_0000_04_10_7",
"address": "0000:04:10.7",
"product_id": '1520',
"numa_node": None,
"vendor_id": '8086',
"label": 'label_8086_1520',
"dev_type": fields.PciDeviceType.SRIOV_VF,
"phys_function": '0000:04:00.3',
}
self.assertEqual(expect_vf, actualvf)
actualvf = drvr._get_pcidev_info("pci_0000_04_11_7")
expect_vf = {
"dev_id": "pci_0000_04_11_7",
"address": "0000:04:11.7",
"product_id": '1520',
"vendor_id": '8086',
"numa_node": 0,
"label": 'label_8086_1520',
"dev_type": fields.PciDeviceType.SRIOV_VF,
"phys_function": '0000:04:00.3',
}
self.assertEqual(expect_vf, actualvf)
def test_list_devices_not_supported(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Handle just the NO_SUPPORT error
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virNodeNumOfDevices',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
with mock.patch.object(drvr._conn, 'listDevices',
side_effect=not_supported_exc):
self.assertEqual('[]', drvr._get_pci_passthrough_devices())
# We cache not supported status to avoid emitting too many logging
# messages. Clear this value to test the other exception case.
del drvr._list_devices_supported
# Other errors should not be caught
other_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'other exc',
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
with mock.patch.object(drvr._conn, 'listDevices',
side_effect=other_exc):
self.assertRaises(fakelibvirt.libvirtError,
drvr._get_pci_passthrough_devices)
def test_get_pci_passthrough_devices(self):
def fakelistDevices(caps, fakeargs=0):
return ['pci_0000_04_00_3', 'pci_0000_04_10_7',
'pci_0000_04_11_7']
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.listDevices = fakelistDevices
def fake_nodeDeviceLookupByName(self, name):
return FakeNodeDevice(_fake_NodeDevXml[name])
self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name')
host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actjson = drvr._get_pci_passthrough_devices()
expectvfs = [
{
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:00.3",
"product_id": '1521',
"vendor_id": '8086',
"dev_type": fields.PciDeviceType.SRIOV_PF,
"phys_function": None,
"numa_node": None},
{
"dev_id": "pci_0000_04_10_7",
"domain": 0,
"address": "0000:04:10.7",
"product_id": '1520',
"vendor_id": '8086',
"numa_node": None,
"dev_type": fields.PciDeviceType.SRIOV_VF,
"phys_function": [('0x0000', '0x04', '0x00', '0x3')]},
{
"dev_id": "pci_0000_04_11_7",
"domain": 0,
"address": "0000:04:11.7",
"product_id": '1520',
"vendor_id": '8086',
"numa_node": 0,
"dev_type": fields.PciDeviceType.SRIOV_VF,
"phys_function": [('0x0000', '0x04', '0x00', '0x3')],
}
]
actualvfs = jsonutils.loads(actjson)
for dev in range(len(actualvfs)):
for key in actualvfs[dev].keys():
if key not in ['phys_function', 'virt_functions', 'label']:
self.assertEqual(expectvfs[dev][key], actualvfs[dev][key])
def _fake_caps_numa_topology(self,
cells_per_host=4,
sockets_per_cell=1,
cores_per_socket=1,
threads_per_core=2,
kb_mem=1048576):
# Generate mempages list per cell
cell_mempages = list()
for cellid in range(cells_per_host):
mempages_0 = vconfig.LibvirtConfigCapsNUMAPages()
mempages_0.size = 4
mempages_0.total = 1024 * cellid
mempages_1 = vconfig.LibvirtConfigCapsNUMAPages()
mempages_1.size = 2048
mempages_1.total = 0 + cellid
cell_mempages.append([mempages_0, mempages_1])
topology = fakelibvirt.HostInfo._gen_numa_topology(cells_per_host,
sockets_per_cell,
cores_per_socket,
threads_per_core,
kb_mem=kb_mem,
numa_mempages_list=cell_mempages)
return topology
def _test_get_host_numa_topology(self, mempages):
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = arch.X86_64
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
expected_topo_dict = {'cells': [
{'cpus': '0,1', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 0},
{'cpus': '3', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 1},
{'cpus': '', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 2},
{'cpus': '', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 3}]}
with test.nested(
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([0, 1, 3, 4, 5])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set([0, 1, 2, 3, 6])),
):
got_topo = drvr._get_host_numa_topology()
got_topo_dict = got_topo._to_dict()
self.assertThat(
expected_topo_dict, matchers.DictMatches(got_topo_dict))
if mempages:
# cells 0
self.assertEqual(4, got_topo.cells[0].mempages[0].size_kb)
self.assertEqual(0, got_topo.cells[0].mempages[0].total)
self.assertEqual(2048, got_topo.cells[0].mempages[1].size_kb)
self.assertEqual(0, got_topo.cells[0].mempages[1].total)
# cells 1
self.assertEqual(4, got_topo.cells[1].mempages[0].size_kb)
self.assertEqual(1024, got_topo.cells[1].mempages[0].total)
self.assertEqual(2048, got_topo.cells[1].mempages[1].size_kb)
self.assertEqual(1, got_topo.cells[1].mempages[1].total)
else:
self.assertEqual([], got_topo.cells[0].mempages)
self.assertEqual([], got_topo.cells[1].mempages)
self.assertEqual(expected_topo_dict, got_topo_dict)
self.assertEqual(set([]), got_topo.cells[0].pinned_cpus)
self.assertEqual(set([]), got_topo.cells[1].pinned_cpus)
self.assertEqual(set([]), got_topo.cells[2].pinned_cpus)
self.assertEqual(set([]), got_topo.cells[3].pinned_cpus)
self.assertEqual([set([0, 1])], got_topo.cells[0].siblings)
self.assertEqual([], got_topo.cells[1].siblings)
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
def test_get_host_numa_topology(self, mock_version):
self._test_get_host_numa_topology(mempages=True)
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
def test_get_host_numa_topology_no_mempages(self, mock_lib_version,
mock_version, mock_type):
self.flags(virt_type='kvm', group='libvirt')
mock_lib_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1
mock_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION)
mock_type.return_value = host.HV_DRIVER_QEMU
self._test_get_host_numa_topology(mempages=False)
def test_get_host_numa_topology_empty(self):
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = arch.X86_64
caps.host.topology = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(host.Host, 'has_min_version', return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps)
) as (has_min_version, get_caps):
self.assertIsNone(drvr._get_host_numa_topology())
self.assertEqual(2, get_caps.call_count)
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
def test_get_host_numa_topology_old_version(self, mock_lib_version,
mock_version, mock_type):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_lib_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1
mock_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION)
mock_type.return_value = host.HV_DRIVER_QEMU
self.assertIsNone(drvr._get_host_numa_topology())
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
def test_get_host_numa_topology_xen(self, mock_lib_version,
mock_version, mock_type):
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_lib_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION)
mock_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION)
mock_type.return_value = host.HV_DRIVER_XEN
self.assertIsNone(drvr._get_host_numa_topology())
def test_diagnostic_vcpus_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
raise fakelibvirt.libvirtError('vcpus missing')
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_blockstats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
raise fakelibvirt.libvirtError('blockStats missing')
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_interfacestats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
raise fakelibvirt.libvirtError('interfaceStat missing')
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_memorystats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
raise fakelibvirt.libvirtError('memoryStats missing')
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_full(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
@mock.patch.object(timeutils, 'utcnow')
@mock.patch.object(host.Host, 'get_domain')
def test_diagnostic_full_with_multiple_interfaces(self, mock_get_domain,
mock_utcnow):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
<interface type="bridge">
<mac address="53:55:00:a5:39:39"/>
<model type="virtio"/>
<target dev="br0"/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self):
return DiagFakeDomain()
mock_get_domain.side_effect = fake_get_domain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
'br0_rx': 4408,
'br0_rx_drop': 0,
'br0_rx_errors': 0,
'br0_rx_packets': 82,
'br0_tx': 0,
'br0_tx_drop': 0,
'br0_tx_errors': 0,
'br0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
mock_utcnow.return_value = diags_time
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0},
{'mac_address': '53:55:00:a5:39:39',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10.,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
@mock.patch.object(host.Host, "list_instance_domains")
def test_failing_vcpu_count(self, mock_list):
"""Domain can fail to return the vcpu description in case it's
just starting up or shutting down. Make sure None is handled
gracefully.
"""
class DiagFakeDomain(object):
def __init__(self, vcpus):
self._vcpus = vcpus
def vcpus(self):
if self._vcpus is None:
raise fakelibvirt.libvirtError("fake-error")
else:
return ([[1, 2, 3, 4]] * self._vcpus, [True] * self._vcpus)
def ID(self):
return 1
def name(self):
return "instance000001"
def UUIDString(self):
return "19479fee-07a5-49bb-9138-d3738280d63c"
mock_list.return_value = [
DiagFakeDomain(None), DiagFakeDomain(5)]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(5, drvr._get_vcpu_used())
mock_list.assert_called_with()
@mock.patch.object(host.Host, "list_instance_domains")
def test_failing_vcpu_count_none(self, mock_list):
"""Domain will return zero if the current number of vcpus used
is None. This is in case of VM state starting up or shutting
down. None type returned is counted as zero.
"""
class DiagFakeDomain(object):
def __init__(self):
pass
def vcpus(self):
return None
def ID(self):
return 1
def name(self):
return "instance000001"
mock_list.return_value = [DiagFakeDomain()]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(0, drvr._get_vcpu_used())
mock_list.assert_called_with()
def test_get_instance_capabilities(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
caps = vconfig.LibvirtConfigCaps()
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = arch.X86_64
guest.domtype = ['kvm', 'qemu']
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = arch.I686
guest.domtype = ['kvm']
caps.guests.append(guest)
return caps
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
want = [(arch.X86_64, 'kvm', 'hvm'),
(arch.X86_64, 'qemu', 'hvm'),
(arch.I686, 'kvm', 'hvm')]
got = drvr._get_instance_capabilities()
self.assertEqual(want, got)
def test_set_cache_mode(self):
self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
drvr._set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'directsync')
def test_set_cache_mode_invalid_mode(self):
self.flags(disk_cachemodes=['file=FAKE'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
drvr._set_cache_mode(fake_conf)
self.assertIsNone(fake_conf.driver_cache)
def test_set_cache_mode_invalid_object(self):
self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuest()
fake_conf.driver_cache = 'fake'
drvr._set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'fake')
@mock.patch('os.unlink')
@mock.patch.object(os.path, 'exists')
def _test_shared_storage_detection(self, is_same,
mock_exists, mock_unlink):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.get_host_ip_addr = mock.MagicMock(return_value='bar')
mock_exists.return_value = is_same
with test.nested(
mock.patch.object(drvr._remotefs, 'create_file'),
mock.patch.object(drvr._remotefs, 'remove_file')
) as (mock_rem_fs_create, mock_rem_fs_remove):
result = drvr._is_storage_shared_with('host', '/path')
mock_rem_fs_create.assert_any_call('host', mock.ANY)
create_args, create_kwargs = mock_rem_fs_create.call_args
self.assertTrue(create_args[1].startswith('/path'))
if is_same:
mock_unlink.assert_called_once_with(mock.ANY)
else:
mock_rem_fs_remove.assert_called_with('host', mock.ANY)
remove_args, remove_kwargs = mock_rem_fs_remove.call_args
self.assertTrue(remove_args[1].startswith('/path'))
return result
def test_shared_storage_detection_same_host(self):
self.assertTrue(self._test_shared_storage_detection(True))
def test_shared_storage_detection_different_host(self):
self.assertFalse(self._test_shared_storage_detection(False))
def test_shared_storage_detection_easy(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(drvr, 'get_host_ip_addr')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(os, 'unlink')
drvr.get_host_ip_addr().AndReturn('foo')
self.mox.ReplayAll()
self.assertTrue(drvr._is_storage_shared_with('foo', '/path'))
def test_store_pid_remove_pid(self):
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
popen = mock.Mock(pid=3)
drvr.job_tracker.add_job(instance, popen.pid)
self.assertIn(3, drvr.job_tracker.jobs[instance.uuid])
drvr.job_tracker.remove_job(instance, popen.pid)
self.assertNotIn(instance.uuid, drvr.job_tracker.jobs)
@mock.patch('nova.virt.libvirt.host.Host.get_domain')
def test_get_domain_info_with_more_return(self, mock_get_domain):
instance = objects.Instance(**self.test_instance)
dom_mock = mock.MagicMock()
dom_mock.info.return_value = [
1, 2048, 737, 8, 12345, 888888
]
dom_mock.ID.return_value = mock.sentinel.instance_id
mock_get_domain.return_value = dom_mock
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_info(instance)
self.assertEqual(1, info.state)
self.assertEqual(2048, info.max_mem_kb)
self.assertEqual(737, info.mem_kb)
self.assertEqual(8, info.num_cpu)
self.assertEqual(12345, info.cpu_time_ns)
self.assertEqual(mock.sentinel.instance_id, info.id)
dom_mock.info.assert_called_once_with()
dom_mock.ID.assert_called_once_with()
mock_get_domain.assert_called_once_with(instance)
def test_create_domain(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_domain = mock.MagicMock()
guest = drvr._create_domain(domain=mock_domain)
self.assertEqual(mock_domain, guest._domain)
mock_domain.createWithFlags.assert_has_calls([mock.call(0)])
@mock.patch('nova.virt.disk.api.clean_lxc_namespace')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_create_domain_lxc(self, mock_get_inst_path, mock_ensure_tree,
mock_setup_container, mock_get_info, mock_clean):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
inst_sys_meta = dict()
mock_instance.system_metadata = inst_sys_meta
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.RUNNING)
with test.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')):
drvr._create_domain_and_network(self.context, 'xml',
mock_instance, [], None)
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
self.assertFalse(mock_instance.called)
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
drvr.image_backend.image.assert_has_calls([mock.call(mock_instance,
'disk')])
setup_container_call = mock.call(
mock_image.get_model(),
container_dir='/tmp/rootfs')
mock_setup_container.assert_has_calls([setup_container_call])
mock_get_info.assert_has_calls([mock.call(mock_instance)])
mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
@mock.patch('nova.virt.disk.api.clean_lxc_namespace')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch.object(fake_libvirt_utils, 'chown_for_id_maps')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_create_domain_lxc_id_maps(self, mock_get_inst_path,
mock_ensure_tree, mock_setup_container,
mock_chown, mock_get_info, mock_clean):
self.flags(virt_type='lxc', uid_maps=["0:1000:100"],
gid_maps=["0:1000:100"], group='libvirt')
def chown_side_effect(path, id_maps):
self.assertEqual('/tmp/rootfs', path)
self.assertIsInstance(id_maps[0], vconfig.LibvirtConfigGuestUIDMap)
self.assertEqual(0, id_maps[0].start)
self.assertEqual(1000, id_maps[0].target)
self.assertEqual(100, id_maps[0].count)
self.assertIsInstance(id_maps[1], vconfig.LibvirtConfigGuestGIDMap)
self.assertEqual(0, id_maps[1].start)
self.assertEqual(1000, id_maps[1].target)
self.assertEqual(100, id_maps[1].count)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
inst_sys_meta = dict()
mock_instance.system_metadata = inst_sys_meta
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_chown.side_effect = chown_side_effect
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.RUNNING)
with test.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')
) as (
mock_create_images_and_backing, mock_is_booted_from_volume,
mock_create_domain, mock_plug_vifs, mock_setup_basic_filtering,
mock_prepare_instance_filter, mock_apply_instance_filter
):
drvr._create_domain_and_network(self.context, 'xml',
mock_instance, [], None)
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
self.assertFalse(mock_instance.called)
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
mock_is_booted_from_volume.assert_called_once_with(mock_instance, {})
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
drvr.image_backend.image.assert_has_calls([mock.call(mock_instance,
'disk')])
setup_container_call = mock.call(
mock_image.get_model(),
container_dir='/tmp/rootfs')
mock_setup_container.assert_has_calls([setup_container_call])
mock_get_info.assert_has_calls([mock.call(mock_instance)])
mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
@mock.patch('nova.virt.disk.api.teardown_container')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_create_domain_lxc_not_running(self, mock_get_inst_path,
mock_ensure_tree,
mock_setup_container,
mock_get_info, mock_teardown):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
inst_sys_meta = dict()
mock_instance.system_metadata = inst_sys_meta
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.SHUTDOWN)
with test.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')):
drvr._create_domain_and_network(self.context, 'xml',
mock_instance, [], None)
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
self.assertFalse(mock_instance.called)
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
drvr.image_backend.image.assert_has_calls([mock.call(mock_instance,
'disk')])
setup_container_call = mock.call(
mock_image.get_model(),
container_dir='/tmp/rootfs')
mock_setup_container.assert_has_calls([setup_container_call])
mock_get_info.assert_has_calls([mock.call(mock_instance)])
teardown_call = mock.call(container_dir='/tmp/rootfs')
mock_teardown.assert_has_calls([teardown_call])
def test_create_domain_define_xml_fails(self):
"""Tests that the xml is logged when defining the domain fails."""
fake_xml = "<test>this is a test</test>"
def fake_defineXML(xml):
self.assertEqual(fake_xml, xml)
raise fakelibvirt.libvirtError('virDomainDefineXML() failed')
def fake_safe_decode(text, *args, **kwargs):
return text + 'safe decoded'
self.log_error_called = False
def fake_error(msg, *args, **kwargs):
self.log_error_called = True
self.assertIn(fake_xml, msg % args)
self.assertIn('safe decoded', msg % args)
self.stubs.Set(encodeutils, 'safe_decode', fake_safe_decode)
self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error)
self.create_fake_libvirt_mock(defineXML=fake_defineXML)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain,
fake_xml)
self.assertTrue(self.log_error_called)
def test_create_domain_with_flags_fails(self):
"""Tests that the xml is logged when creating the domain with flags
fails
"""
fake_xml = "<test>this is a test</test>"
fake_domain = FakeVirtDomain(fake_xml)
def fake_createWithFlags(launch_flags):
raise fakelibvirt.libvirtError('virDomainCreateWithFlags() failed')
self.log_error_called = False
def fake_error(msg, *args, **kwargs):
self.log_error_called = True
self.assertIn(fake_xml, msg % args)
self.stubs.Set(fake_domain, 'createWithFlags', fake_createWithFlags)
self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error)
self.create_fake_libvirt_mock()
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain,
domain=fake_domain)
self.assertTrue(self.log_error_called)
def test_create_domain_enable_hairpin_fails(self):
"""Tests that the xml is logged when enabling hairpin mode for the
domain fails.
"""
fake_xml = "<test>this is a test</test>"
fake_domain = FakeVirtDomain(fake_xml)
def fake_execute(*args, **kwargs):
raise processutils.ProcessExecutionError('error')
def fake_get_interfaces(*args):
return ["dev"]
self.log_error_called = False
def fake_error(msg, *args, **kwargs):
self.log_error_called = True
self.assertIn(fake_xml, msg % args)
self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error)
self.create_fake_libvirt_mock()
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.stubs.Set(nova.utils, 'execute', fake_execute)
self.stubs.Set(
nova.virt.libvirt.guest.Guest, 'get_interfaces',
fake_get_interfaces)
self.assertRaises(processutils.ProcessExecutionError,
drvr._create_domain,
domain=fake_domain,
power_on=False)
self.assertTrue(self.log_error_called)
def test_get_vnc_console(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<graphics type='vnc' port='5900'/>"
"</devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
vnc_dict = drvr.get_vnc_console(self.context, instance)
self.assertEqual(vnc_dict.port, '5900')
def test_get_vnc_console_unavailable(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices></devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ConsoleTypeUnavailable,
drvr.get_vnc_console, self.context, instance)
def test_get_spice_console(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<graphics type='spice' port='5950'/>"
"</devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
spice_dict = drvr.get_spice_console(self.context, instance)
self.assertEqual(spice_dict.port, '5950')
def test_get_spice_console_unavailable(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices></devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ConsoleTypeUnavailable,
drvr.get_spice_console, self.context, instance)
def test_detach_volume_with_instance_not_found(self):
# Test that detach_volume() method does not raise exception,
# if the instance does not exist.
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(host.Host, 'get_domain',
side_effect=exception.InstanceNotFound(
instance_id=instance.uuid)),
mock.patch.object(drvr, '_disconnect_volume')
) as (_get_domain, _disconnect_volume):
connection_info = {'driver_volume_type': 'fake'}
drvr.detach_volume(connection_info, instance, '/dev/sda')
_get_domain.assert_called_once_with(instance)
_disconnect_volume.assert_called_once_with(connection_info,
'sda')
def _test_attach_detach_interface_get_config(self, method_name):
"""Tests that the get_config() method is properly called in
attach_interface() and detach_interface().
method_name: either \"attach_interface\" or \"detach_interface\"
depending on the method to test.
"""
self.stubs.Set(host.Host, "get_domain", lambda a, b: FakeVirtDomain())
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_image_meta = {'id': instance['image_ref']}
fake_image_meta_obj = objects.ImageMeta.from_dict(
fake_image_meta)
if method_name == "attach_interface":
self.mox.StubOutWithMock(drvr.firewall_driver,
'setup_basic_filtering')
drvr.firewall_driver.setup_basic_filtering(instance, network_info)
expected = drvr.vif_driver.get_config(instance, network_info[0],
fake_image_meta_obj,
instance.get_flavor(),
CONF.libvirt.virt_type,
drvr._host)
self.mox.StubOutWithMock(drvr.vif_driver, 'get_config')
drvr.vif_driver.get_config(instance, network_info[0],
mox.IsA(objects.ImageMeta),
mox.IsA(objects.Flavor),
CONF.libvirt.virt_type,
drvr._host).\
AndReturn(expected)
self.mox.ReplayAll()
if method_name == "attach_interface":
drvr.attach_interface(instance, fake_image_meta,
network_info[0])
elif method_name == "detach_interface":
drvr.detach_interface(instance, network_info[0])
else:
raise ValueError("Unhandled method %s" % method_name)
@mock.patch.object(lockutils, "external_lock")
def test_attach_interface_get_config(self, mock_lock):
"""Tests that the get_config() method is properly called in
attach_interface().
"""
mock_lock.return_value = threading.Semaphore()
self._test_attach_detach_interface_get_config("attach_interface")
def test_detach_interface_get_config(self):
"""Tests that the get_config() method is properly called in
detach_interface().
"""
self._test_attach_detach_interface_get_config("detach_interface")
def test_default_root_device_name(self):
instance = {'uuid': 'fake_instance'}
image_meta = {'id': 'fake'}
root_bdm = {'source_type': 'image',
'detination_type': 'volume',
'image_id': 'fake_id'}
self.flags(virt_type='fake_libvirt_type', group='libvirt')
self.mox.StubOutWithMock(blockinfo, 'get_disk_bus_for_device_type')
self.mox.StubOutWithMock(blockinfo, 'get_root_info')
blockinfo.get_disk_bus_for_device_type(instance,
'fake_libvirt_type',
mox.IsA(objects.ImageMeta),
'disk').InAnyOrder().\
AndReturn('virtio')
blockinfo.get_disk_bus_for_device_type(instance,
'fake_libvirt_type',
mox.IsA(objects.ImageMeta),
'cdrom').InAnyOrder().\
AndReturn('ide')
blockinfo.get_root_info(instance, 'fake_libvirt_type',
mox.IsA(objects.ImageMeta), root_bdm,
'virtio', 'ide').AndReturn({'dev': 'vda'})
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(drvr.default_root_device_name(instance, image_meta,
root_bdm), '/dev/vda')
@mock.patch.object(objects.BlockDeviceMapping, "save")
def test_default_device_names_for_instance(self, save_mock):
instance = objects.Instance(**self.test_instance)
instance.root_device_name = '/dev/vda'
ephemerals = [objects.BlockDeviceMapping(
**fake_block_device.AnonFakeDbBlockDeviceDict(
{'device_name': 'vdb',
'source_type': 'blank',
'volume_size': 2,
'destination_type': 'local'}))]
swap = [objects.BlockDeviceMapping(
**fake_block_device.AnonFakeDbBlockDeviceDict(
{'device_name': 'vdg',
'source_type': 'blank',
'volume_size': 512,
'guest_format': 'swap',
'destination_type': 'local'}))]
block_device_mapping = [
objects.BlockDeviceMapping(
**fake_block_device.AnonFakeDbBlockDeviceDict(
{'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-image-id',
'device_name': '/dev/vdxx',
'disk_bus': 'scsi'}))]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.default_device_names_for_instance(instance,
instance.root_device_name,
ephemerals, swap,
block_device_mapping)
# Ephemeral device name was correct so no changes
self.assertEqual('/dev/vdb', ephemerals[0].device_name)
# Swap device name was incorrect so it was changed
self.assertEqual('/dev/vdc', swap[0].device_name)
# Volume device name was changed too, taking the bus into account
self.assertEqual('/dev/sda', block_device_mapping[0].device_name)
self.assertEqual(3, save_mock.call_count)
def _test_get_device_name_for_instance(self, new_bdm, expected_dev):
instance = objects.Instance(**self.test_instance)
instance.root_device_name = '/dev/vda'
instance.ephemeral_gb = 0
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
got_dev = drvr.get_device_name_for_instance(
instance, [], new_bdm)
self.assertEqual(expected_dev, got_dev)
def test_get_device_name_for_instance_simple(self):
new_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
boot_index=-1, volume_id='fake-id',
device_name=None, guest_format=None,
disk_bus=None, device_type=None)
self._test_get_device_name_for_instance(new_bdm, '/dev/vdb')
def test_get_device_name_for_instance_suggested(self):
new_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
boot_index=-1, volume_id='fake-id',
device_name='/dev/vdg', guest_format=None,
disk_bus=None, device_type=None)
self._test_get_device_name_for_instance(new_bdm, '/dev/vdb')
def test_get_device_name_for_instance_bus(self):
new_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
boot_index=-1, volume_id='fake-id',
device_name=None, guest_format=None,
disk_bus='scsi', device_type=None)
self._test_get_device_name_for_instance(new_bdm, '/dev/sda')
def test_get_device_name_for_instance_device_type(self):
new_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
boot_index=-1, volume_id='fake-id',
device_name=None, guest_format=None,
disk_bus=None, device_type='floppy')
self._test_get_device_name_for_instance(new_bdm, '/dev/fda')
def test_is_supported_fs_format(self):
supported_fs = [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3,
disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
for fs in supported_fs:
self.assertTrue(drvr.is_supported_fs_format(fs))
supported_fs = ['', 'dummy']
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
for fs in supported_fs:
self.assertFalse(drvr.is_supported_fs_format(fs))
def test_post_live_migration_at_destination_with_block_device_info(self):
# Preparing mocks
mock_domain = self.mox.CreateMock(fakelibvirt.virDomain)
self.resultXML = None
def fake_getLibVersion():
return 9011
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
def fake_to_xml(context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
if image_meta is None:
image_meta = {}
conf = drvr._get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info)
self.resultXML = conf.to_xml()
return self.resultXML
def fake_get_domain(instance):
return mock_domain
def fake_baselineCPU(cpu, flag):
return """<cpu mode='custom' match='exact'>
<model fallback='allow'>Westmere</model>
<vendor>Intel</vendor>
<feature policy='require' name='aes'/>
</cpu>
"""
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
getCapabilities=fake_getCapabilities,
getVersion=lambda: 1005001,
listDefinedDomains=lambda: [],
numOfDomains=lambda: 0,
baselineCPU=fake_baselineCPU)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
instance = objects.Instance(**instance_ref)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr,
'_get_guest_xml',
fake_to_xml)
self.stubs.Set(host.Host,
'get_domain',
fake_get_domain)
block_device_info = {'block_device_mapping':
driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'guest_format': None,
'boot_index': 0,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': '/dev/vda',
'disk_bus': 'virtio',
'device_type': 'disk',
'delete_on_termination': False}),
])}
block_device_info['block_device_mapping'][0]['connection_info'] = (
{'driver_volume_type': 'iscsi'})
with test.nested(
mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'),
mock.patch.object(objects.Instance, 'save')
) as (mock_volume_save, mock_instance_save):
drvr.post_live_migration_at_destination(
self.context, instance, network_info, True,
block_device_info=block_device_info)
self.assertIn('fake', self.resultXML)
mock_volume_save.assert_called_once_with()
def test_create_propagates_exceptions(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(id=1, uuid='fake-uuid',
image_ref='my_fake_image')
with test.nested(
mock.patch.object(drvr, '_create_domain_setup_lxc'),
mock.patch.object(drvr, '_create_domain_cleanup_lxc'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, 'firewall_driver'),
mock.patch.object(drvr, '_create_domain',
side_effect=exception.NovaException),
mock.patch.object(drvr, 'cleanup')):
self.assertRaises(exception.NovaException,
drvr._create_domain_and_network,
self.context,
'xml',
instance, None, None)
def test_create_without_pause(self):
self.flags(virt_type='lxc', group='libvirt')
@contextlib.contextmanager
def fake_lxc_disk_handler(*args, **kwargs):
yield
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
with test.nested(
mock.patch.object(drvr, '_lxc_disk_handler',
side_effect=fake_lxc_disk_handler),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, 'firewall_driver'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'cleanup')) as (
_handler, cleanup, firewall_driver, create, plug_vifs):
domain = drvr._create_domain_and_network(self.context, 'xml',
instance, None, None)
self.assertEqual(0, create.call_args_list[0][1]['pause'])
self.assertEqual(0, domain.resume.call_count)
def _test_create_with_network_events(self, neutron_failure=None,
power_on=True):
generated_events = []
def wait_timeout():
event = mock.MagicMock()
if neutron_failure == 'timeout':
raise eventlet.timeout.Timeout()
elif neutron_failure == 'error':
event.status = 'failed'
else:
event.status = 'completed'
return event
def fake_prepare(instance, event_name):
m = mock.MagicMock()
m.instance = instance
m.event_name = event_name
m.wait.side_effect = wait_timeout
generated_events.append(m)
return m
virtapi = manager.ComputeVirtAPI(mock.MagicMock())
prepare = virtapi._compute.instance_events.prepare_for_instance_event
prepare.side_effect = fake_prepare
drvr = libvirt_driver.LibvirtDriver(virtapi, False)
instance = objects.Instance(**self.test_instance)
vifs = [{'id': 'vif1', 'active': False},
{'id': 'vif2', 'active': False}]
@mock.patch.object(drvr, 'plug_vifs')
@mock.patch.object(drvr, 'firewall_driver')
@mock.patch.object(drvr, '_create_domain')
@mock.patch.object(drvr, 'cleanup')
def test_create(cleanup, create, fw_driver, plug_vifs):
domain = drvr._create_domain_and_network(self.context, 'xml',
instance, vifs, None,
power_on=power_on)
plug_vifs.assert_called_with(instance, vifs)
pause = self._get_pause_flag(drvr, vifs, power_on=power_on)
self.assertEqual(pause,
create.call_args_list[0][1]['pause'])
if pause:
domain.resume.assert_called_once_with()
if neutron_failure and CONF.vif_plugging_is_fatal:
cleanup.assert_called_once_with(self.context,
instance, network_info=vifs,
block_device_info=None)
test_create()
if utils.is_neutron() and CONF.vif_plugging_timeout and power_on:
prepare.assert_has_calls([
mock.call(instance, 'network-vif-plugged-vif1'),
mock.call(instance, 'network-vif-plugged-vif2')])
for event in generated_events:
if neutron_failure and generated_events.index(event) != 0:
self.assertEqual(0, event.call_count)
elif (neutron_failure == 'error' and
not CONF.vif_plugging_is_fatal):
event.wait.assert_called_once_with()
else:
self.assertEqual(0, prepare.call_count)
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron(self, is_neutron):
self._test_create_with_network_events()
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_power_off(self,
is_neutron):
# Tests that we don't wait for events if we don't start the instance.
self._test_create_with_network_events(power_on=False)
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_nowait(self, is_neutron):
self.flags(vif_plugging_timeout=0)
self._test_create_with_network_events()
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_nonfatal_timeout(
self, is_neutron):
self.flags(vif_plugging_is_fatal=False)
self._test_create_with_network_events(neutron_failure='timeout')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_fatal_timeout(
self, is_neutron):
self.assertRaises(exception.VirtualInterfaceCreateException,
self._test_create_with_network_events,
neutron_failure='timeout')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_nonfatal_error(
self, is_neutron):
self.flags(vif_plugging_is_fatal=False)
self._test_create_with_network_events(neutron_failure='error')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_fatal_error(
self, is_neutron):
self.assertRaises(exception.VirtualInterfaceCreateException,
self._test_create_with_network_events,
neutron_failure='error')
@mock.patch('nova.utils.is_neutron', return_value=False)
def test_create_with_network_events_non_neutron(self, is_neutron):
self._test_create_with_network_events()
@mock.patch('nova.volume.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
def test_create_with_bdm(self, get_info_from_bdm, get_encryption_metadata):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_dom = mock.MagicMock()
mock_encryption_meta = mock.MagicMock()
get_encryption_metadata.return_value = mock_encryption_meta
fake_xml = """
<domain>
<name>instance-00000001</name>
<memory>1048576</memory>
<vcpu>1</vcpu>
<devices>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<source file='/path/fake-volume1'/>
<target dev='vda' bus='virtio'/>
</disk>
</devices>
</domain>
"""
fake_volume_id = "fake-volume-id"
connection_info = {"driver_volume_type": "fake",
"data": {"access_mode": "rw",
"volume_id": fake_volume_id}}
def fake_getitem(*args, **kwargs):
fake_bdm = {'connection_info': connection_info,
'mount_device': '/dev/vda'}
return fake_bdm.get(args[0])
mock_volume = mock.MagicMock()
mock_volume.__getitem__.side_effect = fake_getitem
block_device_info = {'block_device_mapping': [mock_volume]}
network_info = [network_model.VIF(id='1'),
network_model.VIF(id='2', active=True)]
with test.nested(
mock.patch.object(drvr, '_get_volume_encryptor'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver,
'prepare_instance_filter'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'),
) as (get_volume_encryptor, plug_vifs, setup_basic_filtering,
prepare_instance_filter, create_domain, apply_instance_filter):
create_domain.return_value = libvirt_guest.Guest(mock_dom)
guest = drvr._create_domain_and_network(
self.context, fake_xml, instance, network_info, None,
block_device_info=block_device_info)
get_encryption_metadata.assert_called_once_with(self.context,
drvr._volume_api, fake_volume_id, connection_info)
get_volume_encryptor.assert_called_once_with(connection_info,
mock_encryption_meta)
plug_vifs.assert_called_once_with(instance, network_info)
setup_basic_filtering.assert_called_once_with(instance,
network_info)
prepare_instance_filter.assert_called_once_with(instance,
network_info)
pause = self._get_pause_flag(drvr, network_info)
create_domain.assert_called_once_with(
fake_xml, pause=pause, power_on=True)
self.assertEqual(mock_dom, guest._domain)
def test_get_guest_storage_config(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
test_instance = copy.deepcopy(self.test_instance)
test_instance["default_swap_device"] = None
instance = objects.Instance(**test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = instance.get_flavor()
conn_info = {'driver_volume_type': 'fake', 'data': {}}
bdi = {'block_device_mapping':
driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': '/dev/vdc'})
])}
bdm = bdi['block_device_mapping'][0]
bdm['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
bdi)
mock_conf = mock.MagicMock(source_path='fake')
with test.nested(
mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
'save'),
mock.patch.object(drvr, '_connect_volume'),
mock.patch.object(drvr, '_get_volume_config',
return_value=mock_conf),
mock.patch.object(drvr, '_set_cache_mode')
) as (volume_save, connect_volume, get_volume_config, set_cache_mode):
devices = drvr._get_guest_storage_config(instance, image_meta,
disk_info, False, bdi, flavor, "hvm")
self.assertEqual(3, len(devices))
self.assertEqual('/dev/vdb', instance.default_ephemeral_device)
self.assertIsNone(instance.default_swap_device)
connect_volume.assert_called_with(bdm['connection_info'],
{'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
get_volume_config.assert_called_with(bdm['connection_info'],
{'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
volume_save.assert_called_once_with()
self.assertEqual(3, set_cache_mode.call_count)
def test_get_neutron_events(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = [network_model.VIF(id='1'),
network_model.VIF(id='2', active=True)]
events = drvr._get_neutron_events(network_info)
self.assertEqual([('network-vif-plugged', '1')], events)
def test_unplug_vifs_ignores_errors(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
with mock.patch.object(drvr, 'vif_driver') as vif_driver:
vif_driver.unplug.side_effect = exception.AgentError(
method='unplug')
drvr._unplug_vifs('inst', [1], ignore_errors=True)
vif_driver.unplug.assert_called_once_with('inst', 1)
def test_unplug_vifs_reports_errors(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
with mock.patch.object(drvr, 'vif_driver') as vif_driver:
vif_driver.unplug.side_effect = exception.AgentError(
method='unplug')
self.assertRaises(exception.AgentError,
drvr.unplug_vifs, 'inst', [1])
vif_driver.unplug.assert_called_once_with('inst', 1)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
def test_cleanup_pass_with_no_mount_device(self, undefine, unplug):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
drvr.firewall_driver = mock.Mock()
drvr._disconnect_volume = mock.Mock()
fake_inst = {'name': 'foo'}
fake_bdms = [{'connection_info': 'foo',
'mount_device': None}]
with mock.patch('nova.virt.driver'
'.block_device_info_get_mapping',
return_value=fake_bdms):
drvr.cleanup('ctxt', fake_inst, 'netinfo', destroy_disks=False)
self.assertTrue(drvr._disconnect_volume.called)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
def test_cleanup_wants_vif_errors_ignored(self, undefine, unplug):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
fake_inst = {'name': 'foo'}
with mock.patch.object(drvr._conn, 'lookupByName') as lookup:
lookup.return_value = fake_inst
# NOTE(danms): Make unplug cause us to bail early, since
# we only care about how it was called
unplug.side_effect = test.TestingException
self.assertRaises(test.TestingException,
drvr.cleanup, 'ctxt', fake_inst, 'netinfo')
unplug.assert_called_once_with(fake_inst, 'netinfo', True)
@mock.patch.object(driver, 'block_device_info_get_mapping')
@mock.patch.object(host.Host, "get_guest")
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_serial_ports_from_guest')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def test_cleanup_serial_console_enabled(
self, undefine, get_ports, get_guest,
block_device_info_get_mapping):
self.flags(enabled="True", group='serial_console')
instance = 'i1'
network_info = {}
bdm_info = {}
firewall_driver = mock.MagicMock()
guest = mock.Mock(spec=libvirt_guest.Guest)
get_guest.return_value = guest
get_ports.return_value = iter([('127.0.0.1', 10000)])
block_device_info_get_mapping.return_value = ()
# We want to ensure undefine_domain is called after
# lookup_domain.
def undefine_domain(instance):
get_ports.side_effect = Exception("domain undefined")
undefine.side_effect = undefine_domain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
drvr.firewall_driver = firewall_driver
drvr.cleanup(
'ctx', instance, network_info,
block_device_info=bdm_info,
destroy_disks=False, destroy_vifs=False)
get_ports.assert_called_once_with(guest)
undefine.assert_called_once_with(instance)
firewall_driver.unfilter_instance.assert_called_once_with(
instance, network_info=network_info)
block_device_info_get_mapping.assert_called_once_with(bdm_info)
@mock.patch.object(driver, 'block_device_info_get_mapping')
@mock.patch.object(host.Host, "get_guest")
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def test_cleanup_serial_console_domain_gone(
self, undefine, get_guest, block_device_info_get_mapping):
self.flags(enabled="True", group='serial_console')
instance = {'name': 'i1'}
network_info = {}
bdm_info = {}
firewall_driver = mock.MagicMock()
block_device_info_get_mapping.return_value = ()
# Ensure get_guest raises same exception that would have occurred
# if domain was gone.
get_guest.side_effect = exception.InstanceNotFound("domain undefined")
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
drvr.firewall_driver = firewall_driver
drvr.cleanup(
'ctx', instance, network_info,
block_device_info=bdm_info,
destroy_disks=False, destroy_vifs=False)
get_guest.assert_called_once_with(instance)
undefine.assert_called_once_with(instance)
firewall_driver.unfilter_instance.assert_called_once_with(
instance, network_info=network_info)
block_device_info_get_mapping.assert_called_once_with(bdm_info)
def test_swap_volume(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
mock_dom = mock.MagicMock()
guest = libvirt_guest.Guest(mock_dom)
with mock.patch.object(drvr._conn, 'defineXML',
create=True) as mock_define:
xmldoc = "<domain/>"
srcfile = "/first/path"
dstfile = "/second/path"
mock_dom.XMLDesc.return_value = xmldoc
mock_dom.isPersistent.return_value = True
mock_dom.blockJobInfo.return_value = {}
drvr._swap_volume(guest, srcfile, dstfile, 1)
mock_dom.XMLDesc.assert_called_once_with(
flags=(fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
fakelibvirt.VIR_DOMAIN_XML_SECURE))
mock_dom.blockRebase.assert_called_once_with(
srcfile, dstfile, 0, flags=(
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT))
mock_dom.blockResize.assert_called_once_with(
srcfile, 1 * units.Gi / units.Ki)
mock_define.assert_called_once_with(xmldoc)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._swap_volume')
@mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save')
@mock.patch('nova.objects.block_device.BlockDeviceMapping.'
'get_by_volume_id')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_config')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume')
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_swap_volume_driver_bdm_save(self, get_guest,
connect_volume, get_volume_config,
get_by_volume_id, volume_save,
swap_volume, disconnect_volume):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
old_connection_info = {'driver_volume_type': 'fake',
'serial': 'old-volume-id',
'data': {'device_path': '/fake-old-volume',
'access_mode': 'rw'}}
new_connection_info = {'driver_volume_type': 'fake',
'serial': 'new-volume-id',
'data': {'device_path': '/fake-new-volume',
'access_mode': 'rw'}}
mock_dom = mock.MagicMock()
guest = libvirt_guest.Guest(mock_dom)
mock_dom.XMLDesc.return_value = """<domain>
<devices>
<disk type='file'>
<source file='/fake-old-volume'/>
<target dev='vdb' bus='virtio'/>
</disk>
</devices>
</domain>
"""
mock_dom.name.return_value = 'inst'
mock_dom.UUIDString.return_value = 'uuid'
get_guest.return_value = guest
disk_info = {'bus': 'virtio', 'type': 'disk', 'dev': 'vdb'}
get_volume_config.return_value = mock.MagicMock(
source_path='/fake-new-volume')
bdm = objects.BlockDeviceMapping(self.context,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdb',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-2',
'boot_index': 0}))
get_by_volume_id.return_value = bdm
conn.swap_volume(old_connection_info, new_connection_info, instance,
'/dev/vdb', 1)
get_guest.assert_called_once_with(instance)
connect_volume.assert_called_once_with(new_connection_info, disk_info)
swap_volume.assert_called_once_with(guest, 'vdb',
'/fake-new-volume', 1)
disconnect_volume.assert_called_once_with(old_connection_info, 'vdb')
volume_save.assert_called_once_with()
def test_live_snapshot(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
mock_dom = mock.MagicMock()
with test.nested(
mock.patch.object(drvr._conn, 'defineXML', create=True),
mock.patch.object(fake_libvirt_utils, 'get_disk_size'),
mock.patch.object(fake_libvirt_utils, 'get_disk_backing_file'),
mock.patch.object(fake_libvirt_utils, 'create_cow_image'),
mock.patch.object(fake_libvirt_utils, 'chown'),
mock.patch.object(fake_libvirt_utils, 'extract_snapshot'),
) as (mock_define, mock_size, mock_backing, mock_create_cow,
mock_chown, mock_snapshot):
xmldoc = "<domain/>"
srcfile = "/first/path"
dstfile = "/second/path"
bckfile = "/other/path"
dltfile = dstfile + ".delta"
mock_dom.XMLDesc.return_value = xmldoc
mock_dom.isPersistent.return_value = True
mock_size.return_value = 1004009
mock_backing.return_value = bckfile
guest = libvirt_guest.Guest(mock_dom)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
drvr._live_snapshot(self.context, self.test_instance, guest,
srcfile, dstfile, "qcow2", image_meta)
mock_dom.XMLDesc.assert_called_once_with(flags=(
fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
fakelibvirt.VIR_DOMAIN_XML_SECURE))
mock_dom.blockRebase.assert_called_once_with(
srcfile, dltfile, 0, flags=(
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW))
mock_size.assert_called_once_with(srcfile)
mock_backing.assert_called_once_with(srcfile, basename=False)
mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009)
mock_chown.assert_called_once_with(dltfile, os.getuid())
mock_snapshot.assert_called_once_with(dltfile, "qcow2",
dstfile, "qcow2")
mock_define.assert_called_once_with(xmldoc)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration")
def test_live_migration_hostname_valid(self, mock_lm):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.live_migration(self.context, self.test_instance,
"host1.example.com",
lambda x: x,
lambda x: x)
self.assertEqual(1, mock_lm.call_count)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration")
@mock.patch.object(fake_libvirt_utils, "is_valid_hostname")
def test_live_migration_hostname_invalid(self, mock_hostname, mock_lm):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_hostname.return_value = False
self.assertRaises(exception.InvalidHostname,
drvr.live_migration,
self.context, self.test_instance,
"foo/?com=/bin/sh",
lambda x: x,
lambda x: x)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('tempfile.mkstemp')
@mock.patch('os.close', return_value=None)
def test_check_instance_shared_storage_local_raw(self,
mock_close,
mock_mkstemp,
mock_exists):
instance_uuid = str(uuid.uuid4())
self.flags(images_type='raw', group='libvirt')
self.flags(instances_path='/tmp')
mock_mkstemp.return_value = (-1,
'/tmp/{0}/file'.format(instance_uuid))
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
temp_file = driver.check_instance_shared_storage_local(self.context,
instance)
self.assertEqual('/tmp/{0}/file'.format(instance_uuid),
temp_file['filename'])
def test_check_instance_shared_storage_local_rbd(self):
self.flags(images_type='rbd', group='libvirt')
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
self.assertIsNone(driver.
check_instance_shared_storage_local(self.context,
instance))
def test_version_to_string(self):
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
string_ver = driver._version_to_string((4, 33, 173))
self.assertEqual("4.33.173", string_ver)
def test_parallels_min_version_fail(self):
self.flags(virt_type='parallels', group='libvirt')
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(driver._conn, 'getLibVersion',
return_value=1002011):
self.assertRaises(exception.NovaException,
driver.init_host, 'wibble')
def test_parallels_min_version_ok(self):
self.flags(virt_type='parallels', group='libvirt')
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(driver._conn, 'getLibVersion',
return_value=1002012):
driver.init_host('wibble')
def test_get_guest_config_parallels_vm(self):
self.flags(virt_type='parallels', group='libvirt')
self.flags(images_type='ploop', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertEqual("parallels", cfg.virt_type)
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.HVM, cfg.os_type)
self.assertIsNone(cfg.os_root)
self.assertEqual(6, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[0].driver_format, "ploop")
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
def test_get_guest_config_parallels_ct(self):
self.flags(virt_type='parallels', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
ct_instance = self.test_instance.copy()
ct_instance["vm_mode"] = vm_mode.EXE
instance_ref = objects.Instance(**ct_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, {'mapping': {'disk': {}}})
self.assertEqual("parallels", cfg.virt_type)
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.EXE, cfg.os_type)
self.assertEqual("/sbin/init", cfg.os_init_path)
self.assertIsNone(cfg.os_root)
self.assertEqual(4, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestFilesys)
fs = cfg.devices[0]
self.assertEqual(fs.source_type, "file")
self.assertEqual(fs.driver_type, "ploop")
self.assertEqual(fs.target_dir, "/")
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestVideo)
def _test_get_guest_config_parallels_volume(self, vmmode, devices):
self.flags(virt_type='parallels', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
ct_instance = self.test_instance.copy()
ct_instance["vm_mode"] = vmmode
instance_ref = objects.Instance(**ct_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict(
{'id': 0,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sda'}),
])}
info['block_device_mapping'][0]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
info)
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info, None, info)
mock_save.assert_called_once_with()
self.assertEqual("parallels", cfg.virt_type)
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vmmode, cfg.os_type)
self.assertIsNone(cfg.os_root)
self.assertEqual(devices, len(cfg.devices))
disk_found = False
for dev in cfg.devices:
result = isinstance(dev, vconfig.LibvirtConfigGuestFilesys)
self.assertFalse(result)
if (isinstance(dev, vconfig.LibvirtConfigGuestDisk) and
(dev.source_path is None or
'disk.local' not in dev.source_path)):
self.assertEqual("disk", dev.source_device)
self.assertEqual("sda", dev.target_dev)
disk_found = True
self.assertTrue(disk_found)
def test_get_guest_config_parallels_volume(self):
self._test_get_guest_config_parallels_volume(vm_mode.EXE, 4)
self._test_get_guest_config_parallels_volume(vm_mode.HVM, 6)
class HostStateTestCase(test.NoDBTestCase):
cpu_info = {"vendor": "Intel", "model": "pentium", "arch": "i686",
"features": ["ssse3", "monitor", "pni", "sse2", "sse",
"fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge",
"mtrr", "sep", "apic"],
"topology": {"cores": "1", "threads": "1", "sockets": "1"}}
instance_caps = [(arch.X86_64, "kvm", "hvm"),
(arch.I686, "kvm", "hvm")]
pci_devices = [{
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:10.3",
"product_id": '1521',
"vendor_id": '8086',
"dev_type": fields.PciDeviceType.SRIOV_PF,
"phys_function": None}]
numa_topology = objects.NUMATopology(
cells=[objects.NUMACell(
id=1, cpuset=set([1, 2]), memory=1024,
cpu_usage=0, memory_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(
id=2, cpuset=set([3, 4]), memory=1024,
cpu_usage=0, memory_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([]))])
class FakeConnection(libvirt_driver.LibvirtDriver):
"""Fake connection object."""
def __init__(self):
super(HostStateTestCase.FakeConnection,
self).__init__(fake.FakeVirtAPI(), True)
self._host = host.Host("qemu:///system")
def _get_memory_mb_total():
return 497
def _get_memory_mb_used():
return 88
self._host.get_memory_mb_total = _get_memory_mb_total
self._host.get_memory_mb_used = _get_memory_mb_used
def _get_vcpu_total(self):
return 1
def _get_vcpu_used(self):
return 0
def _get_cpu_info(self):
return HostStateTestCase.cpu_info
def _get_disk_over_committed_size_total(self):
return 0
def _get_local_gb_info(self):
return {'total': 100, 'used': 20, 'free': 80}
def get_host_uptime(self):
return ('10:01:16 up 1:36, 6 users, '
'load average: 0.21, 0.16, 0.19')
def _get_disk_available_least(self):
return 13091
def _get_instance_capabilities(self):
return HostStateTestCase.instance_caps
def _get_pci_passthrough_devices(self):
return jsonutils.dumps(HostStateTestCase.pci_devices)
def _get_host_numa_topology(self):
return HostStateTestCase.numa_topology
@mock.patch.object(fakelibvirt, "openAuth")
def test_update_status(self, mock_open):
mock_open.return_value = fakelibvirt.Connection("qemu:///system")
drvr = HostStateTestCase.FakeConnection()
stats = drvr.get_available_resource("compute1")
self.assertEqual(stats["vcpus"], 1)
self.assertEqual(stats["memory_mb"], 497)
self.assertEqual(stats["local_gb"], 100)
self.assertEqual(stats["vcpus_used"], 0)
self.assertEqual(stats["memory_mb_used"], 88)
self.assertEqual(stats["local_gb_used"], 20)
self.assertEqual(stats["hypervisor_type"], 'QEMU')
self.assertEqual(stats["hypervisor_version"], 1001000)
self.assertEqual(stats["hypervisor_hostname"], 'compute1')
cpu_info = jsonutils.loads(stats["cpu_info"])
self.assertEqual(cpu_info,
{"vendor": "Intel", "model": "pentium",
"arch": arch.I686,
"features": ["ssse3", "monitor", "pni", "sse2", "sse",
"fxsr", "clflush", "pse36", "pat", "cmov",
"mca", "pge", "mtrr", "sep", "apic"],
"topology": {"cores": "1", "threads": "1", "sockets": "1"}
})
self.assertEqual(stats["disk_available_least"], 80)
self.assertEqual(jsonutils.loads(stats["pci_passthrough_devices"]),
HostStateTestCase.pci_devices)
self.assertThat(objects.NUMATopology.obj_from_db_obj(
stats['numa_topology'])._to_dict(),
matchers.DictMatches(
HostStateTestCase.numa_topology._to_dict()))
class LibvirtDriverTestCase(test.NoDBTestCase):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver."""
def setUp(self):
super(LibvirtDriverTestCase, self).setUp()
self.drvr = libvirt_driver.LibvirtDriver(
fake.FakeVirtAPI(), read_only=True)
self.context = context.get_admin_context()
self.test_image_meta = {
"disk_format": "raw",
}
def _create_instance(self, params=None):
"""Create a test instance."""
if not params:
params = {}
flavor = objects.Flavor(memory_mb=512,
swap=0,
vcpu_weight=None,
root_gb=10,
id=2,
name=u'm1.tiny',
ephemeral_gb=20,
rxtx_factor=1.0,
flavorid=u'1',
vcpus=1)
inst = {}
inst['id'] = 1
inst['uuid'] = '52d3b512-1152-431f-a8f7-28f0288a622b'
inst['os_type'] = 'linux'
inst['image_ref'] = '1'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
inst['instance_type_id'] = 2
inst['ami_launch_index'] = 0
inst['host'] = 'host1'
inst['root_gb'] = flavor.root_gb
inst['ephemeral_gb'] = flavor.ephemeral_gb
inst['config_drive'] = True
inst['kernel_id'] = 2
inst['ramdisk_id'] = 3
inst['key_data'] = 'ABCDEFG'
inst['system_metadata'] = {}
inst['metadata'] = {}
inst.update(params)
return objects.Instance(flavor=flavor,
old_flavor=None, new_flavor=None,
**inst)
@staticmethod
def _disk_info():
# 10G root and 512M swap disk
disk_info = [{'disk_size': 1, 'type': 'qcow2',
'virt_disk_size': 10737418240, 'path': '/test/disk',
'backing_file': '/base/disk'},
{'disk_size': 1, 'type': 'qcow2',
'virt_disk_size': 536870912, 'path': '/test/disk.swap',
'backing_file': '/base/swap_512'}]
return jsonutils.dumps(disk_info)
def test_migrate_disk_and_power_off_exception(self):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
self.counter = 0
self.checked_shared_storage = False
def fake_get_instance_disk_info(instance,
block_device_info=None):
return '[]'
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
def fake_execute(*args, **kwargs):
self.counter += 1
if self.counter == 1:
assert False, "intentional failure"
def fake_os_path_exists(path):
return True
def fake_is_storage_shared(dest, inst_base):
self.checked_shared_storage = True
return False
self.stubs.Set(self.drvr, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.drvr, '_destroy', fake_destroy)
self.stubs.Set(self.drvr, 'get_host_ip_addr',
fake_get_host_ip_addr)
self.stubs.Set(self.drvr, '_is_storage_shared_with',
fake_is_storage_shared)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
ins_ref = self._create_instance()
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
flavor_obj = objects.Flavor(**flavor)
self.assertRaises(AssertionError,
self.drvr.migrate_disk_and_power_off,
context.get_admin_context(), ins_ref, '10.0.0.2',
flavor_obj, None)
def _test_migrate_disk_and_power_off(self, flavor_obj,
block_device_info=None,
params_for_instance=None):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
disk_info = self._disk_info()
def fake_get_instance_disk_info(instance,
block_device_info=None):
return disk_info
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
def fake_execute(*args, **kwargs):
pass
def fake_copy_image(src, dest, host=None, receive=False,
on_execute=None, on_completion=None,
compression=True):
self.assertIsNotNone(on_execute)
self.assertIsNotNone(on_completion)
self.stubs.Set(self.drvr, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.drvr, '_destroy', fake_destroy)
self.stubs.Set(self.drvr, 'get_host_ip_addr',
fake_get_host_ip_addr)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(libvirt_utils, 'copy_image', fake_copy_image)
ins_ref = self._create_instance(params=params_for_instance)
# dest is different host case
out = self.drvr.migrate_disk_and_power_off(
context.get_admin_context(), ins_ref, '10.0.0.2',
flavor_obj, None, block_device_info=block_device_info)
self.assertEqual(out, disk_info)
# dest is same host case
out = self.drvr.migrate_disk_and_power_off(
context.get_admin_context(), ins_ref, '10.0.0.1',
flavor_obj, None, block_device_info=block_device_info)
self.assertEqual(out, disk_info)
def test_migrate_disk_and_power_off(self):
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
flavor_obj = objects.Flavor(**flavor)
self._test_migrate_disk_and_power_off(flavor_obj)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
def test_migrate_disk_and_power_off_boot_from_volume(self,
disconnect_volume):
info = {'block_device_mapping': [{'boot_index': None,
'mount_device': '/dev/vdd',
'connection_info': None},
{'boot_index': 0,
'mount_device': '/dev/vda',
'connection_info': None}]}
flavor = {'root_gb': 1, 'ephemeral_gb': 0}
flavor_obj = objects.Flavor(**flavor)
# Note(Mike_D): The size of instance's ephemeral_gb is 0 gb.
self._test_migrate_disk_and_power_off(
flavor_obj, block_device_info=info,
params_for_instance={'image_ref': None, 'ephemeral_gb': 0})
disconnect_volume.assert_called_with(
info['block_device_mapping'][1]['connection_info'], 'vda')
@mock.patch('nova.utils.execute')
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
def test_migrate_disk_and_power_off_swap(self, mock_get_disk_info,
get_host_ip_addr,
mock_destroy,
mock_copy_image,
mock_execute):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
self.copy_or_move_swap_called = False
disk_info = self._disk_info()
mock_get_disk_info.return_value = disk_info
get_host_ip_addr.return_value = '10.0.0.1'
def fake_copy_image(*args, **kwargs):
# disk.swap should not be touched since it is skipped over
if '/test/disk.swap' in list(args):
self.copy_or_move_swap_called = True
def fake_execute(*args, **kwargs):
# disk.swap should not be touched since it is skipped over
if set(['mv', '/test/disk.swap']).issubset(list(args)):
self.copy_or_move_swap_called = True
mock_copy_image.side_effect = fake_copy_image
mock_execute.side_effect = fake_execute
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Original instance config
instance = self._create_instance({'root_gb': 10,
'ephemeral_gb': 0})
# Re-size fake instance to 20G root and 1024M swap disk
flavor = {'root_gb': 20, 'ephemeral_gb': 0, 'swap': 1024}
flavor_obj = objects.Flavor(**flavor)
# Destination is same host
out = drvr.migrate_disk_and_power_off(context.get_admin_context(),
instance, '10.0.0.1',
flavor_obj, None)
mock_get_disk_info.assert_called_once_with(instance,
block_device_info=None)
self.assertTrue(get_host_ip_addr.called)
mock_destroy.assert_called_once_with(instance)
self.assertFalse(self.copy_or_move_swap_called)
self.assertEqual(disk_info, out)
def _test_migrate_disk_and_power_off_resize_check(self, expected_exc):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtConnection
.migrate_disk_and_power_off.
"""
def fake_get_instance_disk_info(instance, xml=None,
block_device_info=None):
return self._disk_info()
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
self.stubs.Set(self.drvr, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.drvr, '_destroy', fake_destroy)
self.stubs.Set(self.drvr, 'get_host_ip_addr',
fake_get_host_ip_addr)
ins_ref = self._create_instance()
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
flavor_obj = objects.Flavor(**flavor)
# Migration is not implemented for LVM backed instances
self.assertRaises(expected_exc,
self.drvr.migrate_disk_and_power_off,
None, ins_ref, '10.0.0.1', flavor_obj, None)
@mock.patch('nova.utils.execute')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'._is_storage_shared_with')
def _test_migrate_disk_and_power_off_backing_file(self,
shared_storage,
mock_is_shared_storage,
mock_get_disk_info,
mock_destroy,
mock_execute):
self.convert_file_called = False
flavor = {'root_gb': 20, 'ephemeral_gb': 30, 'swap': 0}
flavor_obj = objects.Flavor(**flavor)
disk_info = [{'type': 'qcow2', 'path': '/test/disk',
'virt_disk_size': '10737418240',
'backing_file': '/base/disk',
'disk_size': '83886080'}]
disk_info_text = jsonutils.dumps(disk_info)
mock_get_disk_info.return_value = disk_info_text
mock_is_shared_storage.return_value = shared_storage
def fake_execute(*args, **kwargs):
self.assertNotEqual(args[0:2], ['qemu-img', 'convert'])
mock_execute.side_effect = fake_execute
instance = self._create_instance()
out = self.drvr.migrate_disk_and_power_off(
context.get_admin_context(), instance, '10.0.0.2',
flavor_obj, None)
self.assertTrue(mock_is_shared_storage.called)
mock_destroy.assert_called_once_with(instance)
self.assertEqual(out, disk_info_text)
def test_migrate_disk_and_power_off_shared_storage(self):
self._test_migrate_disk_and_power_off_backing_file(True)
def test_migrate_disk_and_power_off_non_shared_storage(self):
self._test_migrate_disk_and_power_off_backing_file(False)
def test_migrate_disk_and_power_off_lvm(self):
self.flags(images_type='lvm', group='libvirt')
def fake_execute(*args, **kwargs):
pass
self.stubs.Set(utils, 'execute', fake_execute)
expected_exc = exception.InstanceFaultRollback
self._test_migrate_disk_and_power_off_resize_check(expected_exc)
def test_migrate_disk_and_power_off_resize_cannot_ssh(self):
def fake_execute(*args, **kwargs):
raise processutils.ProcessExecutionError()
def fake_is_storage_shared(dest, inst_base):
self.checked_shared_storage = True
return False
self.stubs.Set(self.drvr, '_is_storage_shared_with',
fake_is_storage_shared)
self.stubs.Set(utils, 'execute', fake_execute)
expected_exc = exception.InstanceFaultRollback
self._test_migrate_disk_and_power_off_resize_check(expected_exc)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
def test_migrate_disk_and_power_off_resize_error(self, mock_get_disk_info):
instance = self._create_instance()
flavor = {'root_gb': 5, 'ephemeral_gb': 10}
flavor_obj = objects.Flavor(**flavor)
mock_get_disk_info.return_value = self._disk_info()
self.assertRaises(
exception.InstanceFaultRollback,
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
def test_migrate_disk_and_power_off_resize_error_default_ephemeral(
self, mock_get_disk_info):
# Note(Mike_D): The size of this instance's ephemeral_gb is 20 gb.
instance = self._create_instance()
flavor = {'root_gb': 10, 'ephemeral_gb': 0}
flavor_obj = objects.Flavor(**flavor)
mock_get_disk_info.return_value = self._disk_info()
self.assertRaises(exception.InstanceFaultRollback,
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
@mock.patch('nova.virt.driver.block_device_info_get_ephemerals')
def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get,
mock_get_disk_info):
mappings = [
{
'device_name': '/dev/sdb4',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': 1
},
{
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'device_type': 'disk',
'volume_id': 1,
'guest_format': None,
'boot_index': 1,
'volume_size': 6
},
{
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'snapshot_id': 1,
'device_type': 'disk',
'guest_format': None,
'boot_index': 0,
'volume_size': 4
},
{
'device_name': '/dev/sda3',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1,
'volume_size': 3
}
]
mock_get.return_value = mappings
instance = self._create_instance()
# Old flavor, eph is 20, real disk is 3, target is 2, fail
flavor = {'root_gb': 10, 'ephemeral_gb': 2}
flavor_obj = objects.Flavor(**flavor)
mock_get_disk_info.return_value = self._disk_info()
self.assertRaises(
exception.InstanceFaultRollback,
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
# Old flavor, eph is 20, real disk is 3, target is 4
flavor = {'root_gb': 10, 'ephemeral_gb': 4}
flavor_obj = objects.Flavor(**flavor)
self._test_migrate_disk_and_power_off(flavor_obj)
def test_wait_for_running(self):
def fake_get_info(instance):
if instance['name'] == "not_found":
raise exception.InstanceNotFound(instance_id=instance['uuid'])
elif instance['name'] == "running":
return hardware.InstanceInfo(state=power_state.RUNNING)
else:
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
self.stubs.Set(self.drvr, 'get_info',
fake_get_info)
# instance not found case
self.assertRaises(exception.InstanceNotFound,
self.drvr._wait_for_running,
{'name': 'not_found',
'uuid': 'not_found_uuid'})
# instance is running case
self.assertRaises(loopingcall.LoopingCallDone,
self.drvr._wait_for_running,
{'name': 'running',
'uuid': 'running_uuid'})
# else case
self.drvr._wait_for_running({'name': 'else',
'uuid': 'other_uuid'})
def test_disk_size_from_instance_disk_info(self):
instance_data = {'root_gb': 10, 'ephemeral_gb': 20, 'swap_gb': 30}
inst = objects.Instance(**instance_data)
info = {'path': '/path/disk'}
self.assertEqual(10 * units.Gi,
self.drvr._disk_size_from_instance(inst, info))
info = {'path': '/path/disk.local'}
self.assertEqual(20 * units.Gi,
self.drvr._disk_size_from_instance(inst, info))
info = {'path': '/path/disk.swap'}
self.assertEqual(0,
self.drvr._disk_size_from_instance(inst, info))
@mock.patch('nova.utils.execute')
def test_disk_raw_to_qcow2(self, mock_execute):
path = '/test/disk'
_path_qcow = path + '_qcow'
self.drvr._disk_raw_to_qcow2(path)
mock_execute.assert_has_calls([
mock.call('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', path, _path_qcow),
mock.call('mv', _path_qcow, path)])
@mock.patch('nova.utils.execute')
def test_disk_qcow2_to_raw(self, mock_execute):
path = '/test/disk'
_path_raw = path + '_raw'
self.drvr._disk_qcow2_to_raw(path)
mock_execute.assert_has_calls([
mock.call('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', path, _path_raw),
mock.call('mv', _path_raw, path)])
@mock.patch('nova.virt.disk.api.extend')
def test_disk_resize_raw(self, mock_extend):
image = imgmodel.LocalFileImage("/test/disk",
imgmodel.FORMAT_RAW)
self.drvr._disk_resize(image, 50)
mock_extend.assert_called_once_with(image, 50)
@mock.patch('nova.virt.disk.api.can_resize_image')
@mock.patch('nova.virt.disk.api.is_image_extendable')
@mock.patch('nova.virt.disk.api.extend')
def test_disk_resize_qcow2(
self, mock_extend, mock_can_resize, mock_is_image_extendable):
with test.nested(
mock.patch.object(
self.drvr, '_disk_qcow2_to_raw'),
mock.patch.object(
self.drvr, '_disk_raw_to_qcow2'))\
as (mock_disk_qcow2_to_raw, mock_disk_raw_to_qcow2):
mock_can_resize.return_value = True
mock_is_image_extendable.return_value = True
imageqcow2 = imgmodel.LocalFileImage("/test/disk",
imgmodel.FORMAT_QCOW2)
imageraw = imgmodel.LocalFileImage("/test/disk",
imgmodel.FORMAT_RAW)
self.drvr._disk_resize(imageqcow2, 50)
mock_disk_qcow2_to_raw.assert_called_once_with(imageqcow2.path)
mock_extend.assert_called_once_with(imageraw, 50)
mock_disk_raw_to_qcow2.assert_called_once_with(imageqcow2.path)
def _test_finish_migration(self, power_on, resize_instance=False):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_migration.
"""
powered_on = power_on
self.fake_create_domain_called = False
self.fake_disk_resize_called = False
def fake_to_xml(context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
return ""
def fake_plug_vifs(instance, network_info):
pass
def fake_create_image(context, inst,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, inject_files=True,
fallback_from_host=None):
self.assertFalse(inject_files)
def fake_create_domain_and_network(
context, xml, instance, network_info, disk_info,
block_device_info=None, power_on=True, reboot=False,
vifs_already_plugged=False):
self.fake_create_domain_called = True
self.assertEqual(powered_on, power_on)
self.assertTrue(vifs_already_plugged)
def fake_enable_hairpin():
pass
def fake_execute(*args, **kwargs):
pass
def fake_get_info(instance):
if powered_on:
return hardware.InstanceInfo(state=power_state.RUNNING)
else:
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
def fake_disk_resize(image, size):
self.fake_disk_resize_called = True
self.flags(use_cow_images=True)
self.stubs.Set(self.drvr, '_disk_resize',
fake_disk_resize)
self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml)
self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(self.drvr, '_create_image',
fake_create_image)
self.stubs.Set(self.drvr, '_create_domain_and_network',
fake_create_domain_and_network)
self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin',
fake_enable_hairpin)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
self.stubs.Set(self.drvr, 'firewall_driver', fw)
self.stubs.Set(self.drvr, 'get_info',
fake_get_info)
ins_ref = self._create_instance()
migration = objects.Migration()
migration.source_compute = 'fake-source-compute'
migration.dest_compute = 'fake-dest-compute'
migration.source_node = 'fake-source-node'
migration.dest_node = 'fake-dest-node'
self.drvr.finish_migration(
context.get_admin_context(), migration, ins_ref,
self._disk_info(), [], self.test_image_meta,
resize_instance, None, power_on)
self.assertTrue(self.fake_create_domain_called)
self.assertEqual(
resize_instance, self.fake_disk_resize_called)
def test_finish_migration_resize(self):
self._test_finish_migration(True, resize_instance=True)
def test_finish_migration_power_on(self):
self._test_finish_migration(True)
def test_finish_migration_power_off(self):
self._test_finish_migration(False)
def _test_finish_revert_migration(self, power_on):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_revert_migration.
"""
powered_on = power_on
self.fake_create_domain_called = False
def fake_execute(*args, **kwargs):
pass
def fake_plug_vifs(instance, network_info):
pass
def fake_create_domain(context, xml, instance, network_info,
disk_info, block_device_info=None,
power_on=None,
vifs_already_plugged=None):
self.fake_create_domain_called = True
self.assertEqual(powered_on, power_on)
self.assertTrue(vifs_already_plugged)
return mock.MagicMock()
def fake_enable_hairpin():
pass
def fake_get_info(instance):
if powered_on:
return hardware.InstanceInfo(state=power_state.RUNNING)
else:
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
def fake_to_xml(context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None):
return ""
self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml)
self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
self.stubs.Set(self.drvr, 'firewall_driver', fw)
self.stubs.Set(self.drvr, '_create_domain_and_network',
fake_create_domain)
self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin',
fake_enable_hairpin)
self.stubs.Set(self.drvr, 'get_info',
fake_get_info)
self.stubs.Set(utils, 'get_image_from_system_metadata',
lambda *a: self.test_image_meta)
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
ins_ref = self._create_instance()
os.mkdir(os.path.join(tmpdir, ins_ref['name']))
libvirt_xml_path = os.path.join(tmpdir,
ins_ref['name'],
'libvirt.xml')
f = open(libvirt_xml_path, 'w')
f.close()
self.drvr.finish_revert_migration(
context.get_admin_context(), ins_ref,
[], None, power_on)
self.assertTrue(self.fake_create_domain_called)
def test_finish_revert_migration_power_on(self):
self._test_finish_revert_migration(True)
def test_finish_revert_migration_power_off(self):
self._test_finish_revert_migration(False)
def _test_finish_revert_migration_after_crash(self, backup_made=True,
del_inst_failed=False):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
def wait(self):
return None
context = 'fake_context'
instance = self._create_instance()
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(shutil, 'rmtree')
self.mox.StubOutWithMock(utils, 'execute')
self.stubs.Set(blockinfo, 'get_disk_info', lambda *a: None)
self.stubs.Set(self.drvr, '_get_guest_xml',
lambda *a, **k: None)
self.stubs.Set(self.drvr, '_create_domain_and_network',
lambda *a, **kw: None)
self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
libvirt_utils.get_instance_path(instance).AndReturn('/fake/foo')
os.path.exists('/fake/foo_resize').AndReturn(backup_made)
if backup_made:
if del_inst_failed:
os_error = OSError(errno.ENOENT, 'No such file or directory')
shutil.rmtree('/fake/foo').AndRaise(os_error)
else:
shutil.rmtree('/fake/foo')
utils.execute('mv', '/fake/foo_resize', '/fake/foo')
self.mox.ReplayAll()
self.drvr.finish_revert_migration(context, instance, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(backup_made=True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(backup_made=True)
def test_finish_revert_migration_after_crash_before_backup(self):
self._test_finish_revert_migration_after_crash(backup_made=False)
def test_finish_revert_migration_after_crash_delete_failed(self):
self._test_finish_revert_migration_after_crash(backup_made=True,
del_inst_failed=True)
def test_finish_revert_migration_preserves_disk_bus(self):
def fake_get_guest_xml(context, instance, network_info, disk_info,
image_meta, block_device_info=None):
self.assertEqual('ide', disk_info['disk_bus'])
image_meta = {"disk_format": "raw",
"properties": {"hw_disk_bus": "ide"}}
instance = self._create_instance()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=image_meta),
mock.patch.object(drvr, '_get_guest_xml',
side_effect=fake_get_guest_xml)):
drvr.finish_revert_migration('', instance, None, power_on=False)
def test_cleanup_failed_migration(self):
self.mox.StubOutWithMock(shutil, 'rmtree')
shutil.rmtree('/fake/inst')
self.mox.ReplayAll()
self.drvr._cleanup_failed_migration('/fake/inst')
def test_confirm_migration(self):
ins_ref = self._create_instance()
self.mox.StubOutWithMock(self.drvr, "_cleanup_resize")
self.drvr._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
self.mox.ReplayAll()
self.drvr.confirm_migration("migration_ref", ins_ref,
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_same_host(self):
CONF.set_override('policy_dirs', [])
ins_ref = self._create_instance({'host': CONF.host})
def fake_os_path_exists(path):
return True
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(utils, 'execute')
libvirt_utils.get_instance_path(ins_ref,
forceold=True).AndReturn('/fake/inst')
utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
attempts=5)
self.mox.ReplayAll()
self.drvr._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_not_same_host(self):
CONF.set_override('policy_dirs', [])
host = 'not' + CONF.host
ins_ref = self._create_instance({'host': host})
def fake_os_path_exists(path):
return True
def fake_undefine_domain(instance):
pass
def fake_unplug_vifs(instance, network_info, ignore_errors=False):
pass
def fake_unfilter_instance(instance, network_info):
pass
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
self.stubs.Set(self.drvr, '_undefine_domain',
fake_undefine_domain)
self.stubs.Set(self.drvr, 'unplug_vifs',
fake_unplug_vifs)
self.stubs.Set(self.drvr.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(utils, 'execute')
libvirt_utils.get_instance_path(ins_ref,
forceold=True).AndReturn('/fake/inst')
utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
attempts=5)
self.mox.ReplayAll()
self.drvr._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
def test_get_instance_disk_info_exception(self):
instance = self._create_instance()
class FakeExceptionDomain(FakeVirtDomain):
def __init__(self):
super(FakeExceptionDomain, self).__init__()
def XMLDesc(self, flags):
raise fakelibvirt.libvirtError("Libvirt error")
def fake_get_domain(self, instance):
return FakeExceptionDomain()
self.stubs.Set(host.Host, 'get_domain',
fake_get_domain)
self.assertRaises(exception.InstanceNotFound,
self.drvr.get_instance_disk_info,
instance)
@mock.patch('os.path.exists')
@mock.patch.object(lvm, 'list_volumes')
def test_lvm_disks(self, listlvs, exists):
instance = objects.Instance(uuid='fake-uuid', id=1)
self.flags(images_volume_group='vols', group='libvirt')
exists.return_value = True
listlvs.return_value = ['fake-uuid_foo',
'other-uuid_foo']
disks = self.drvr._lvm_disks(instance)
self.assertEqual(['/dev/vols/fake-uuid_foo'], disks)
def test_is_booted_from_volume(self):
func = libvirt_driver.LibvirtDriver._is_booted_from_volume
instance, disk_mapping = {}, {}
self.assertTrue(func(instance, disk_mapping))
disk_mapping['disk'] = 'map'
self.assertTrue(func(instance, disk_mapping))
instance['image_ref'] = 'uuid'
self.assertFalse(func(instance, disk_mapping))
@mock.patch('nova.virt.netutils.get_injected_network_template')
@mock.patch('nova.virt.disk.api.inject_data')
@mock.patch.object(libvirt_driver.LibvirtDriver, "_conn")
def _test_inject_data(self, driver_params, path, disk_params,
mock_conn, disk_inject_data, inj_network,
called=True):
class ImageBackend(object):
path = '/path'
def check_image_exists(self):
if self.path == '/fail/path':
return False
return True
def get_model(self, connection):
return imgmodel.LocalFileImage(self.path,
imgmodel.FORMAT_RAW)
def fake_inj_network(*args, **kwds):
return args[0] or None
inj_network.side_effect = fake_inj_network
image_backend = ImageBackend()
image_backend.path = path
with mock.patch.object(
self.drvr.image_backend,
'image',
return_value=image_backend):
self.flags(inject_partition=0, group='libvirt')
self.drvr._inject_data(**driver_params)
if called:
disk_inject_data.assert_called_once_with(
mock.ANY,
*disk_params,
partition=None, mandatory=('files',))
self.assertEqual(disk_inject_data.called, called)
def _test_inject_data_default_driver_params(self, **params):
return {
'instance': self._create_instance(params=params),
'network_info': None,
'admin_pass': None,
'files': None,
'suffix': ''
}
def test_inject_data_adminpass(self):
self.flags(inject_password=True, group='libvirt')
driver_params = self._test_inject_data_default_driver_params()
driver_params['admin_pass'] = 'foobar'
disk_params = [
None, # key
None, # net
{}, # metadata
'foobar', # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/path", disk_params)
# Test with the configuration setted to false.
self.flags(inject_password=False, group='libvirt')
self._test_inject_data(driver_params, "/path",
disk_params, called=False)
def test_inject_data_key(self):
driver_params = self._test_inject_data_default_driver_params()
driver_params['instance']['key_data'] = 'key-content'
self.flags(inject_key=True, group='libvirt')
disk_params = [
'key-content', # key
None, # net
{}, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/path", disk_params)
# Test with the configuration setted to false.
self.flags(inject_key=False, group='libvirt')
self._test_inject_data(driver_params, "/path",
disk_params, called=False)
def test_inject_data_metadata(self):
instance_metadata = {'metadata': {'data': 'foo'}}
driver_params = self._test_inject_data_default_driver_params(
**instance_metadata
)
disk_params = [
None, # key
None, # net
{'data': 'foo'}, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/path", disk_params)
def test_inject_data_files(self):
driver_params = self._test_inject_data_default_driver_params()
driver_params['files'] = ['file1', 'file2']
disk_params = [
None, # key
None, # net
{}, # metadata
None, # admin_pass
['file1', 'file2'], # files
]
self._test_inject_data(driver_params, "/path", disk_params)
def test_inject_data_net(self):
driver_params = self._test_inject_data_default_driver_params()
driver_params['network_info'] = {'net': 'eno1'}
disk_params = [
None, # key
{'net': 'eno1'}, # net
{}, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/path", disk_params)
def test_inject_not_exist_image(self):
driver_params = self._test_inject_data_default_driver_params()
disk_params = [
'key-content', # key
None, # net
None, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/fail/path",
disk_params, called=False)
def _test_attach_detach_interface(self, method, power_state,
expected_flags):
instance = self._create_instance()
network_info = _fake_network_info(self.stubs, 1)
domain = FakeVirtDomain()
self.mox.StubOutWithMock(host.Host, 'get_domain')
self.mox.StubOutWithMock(self.drvr.firewall_driver,
'setup_basic_filtering')
self.mox.StubOutWithMock(domain, 'attachDeviceFlags')
self.mox.StubOutWithMock(domain, 'info')
host.Host.get_domain(instance).AndReturn(domain)
if method == 'attach_interface':
self.drvr.firewall_driver.setup_basic_filtering(
instance, [network_info[0]])
fake_image_meta = {'id': instance.image_ref}
fake_image_meta_obj = objects.ImageMeta.from_dict(
fake_image_meta)
expected = self.drvr.vif_driver.get_config(
instance, network_info[0], fake_image_meta_obj, instance.flavor,
CONF.libvirt.virt_type, self.drvr._host)
self.mox.StubOutWithMock(self.drvr.vif_driver,
'get_config')
self.drvr.vif_driver.get_config(
instance, network_info[0],
mox.IsA(objects.ImageMeta),
mox.IsA(objects.Flavor),
CONF.libvirt.virt_type,
self.drvr._host).AndReturn(expected)
domain.info().AndReturn([power_state, 1, 2, 3, 4])
if method == 'attach_interface':
domain.attachDeviceFlags(expected.to_xml(), flags=expected_flags)
elif method == 'detach_interface':
domain.detachDeviceFlags(expected.to_xml(), expected_flags)
self.mox.ReplayAll()
if method == 'attach_interface':
self.drvr.attach_interface(
instance, fake_image_meta, network_info[0])
elif method == 'detach_interface':
self.drvr.detach_interface(
instance, network_info[0])
self.mox.VerifyAll()
def test_attach_interface_with_running_instance(self):
self._test_attach_detach_interface(
'attach_interface', power_state.RUNNING,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_attach_interface_with_pause_instance(self):
self._test_attach_detach_interface(
'attach_interface', power_state.PAUSED,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_attach_interface_with_shutdown_instance(self):
self._test_attach_detach_interface(
'attach_interface', power_state.SHUTDOWN,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG))
def test_detach_interface_with_running_instance(self):
self._test_attach_detach_interface(
'detach_interface', power_state.RUNNING,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_detach_interface_with_pause_instance(self):
self._test_attach_detach_interface(
'detach_interface', power_state.PAUSED,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_detach_interface_with_shutdown_instance(self):
self._test_attach_detach_interface(
'detach_interface', power_state.SHUTDOWN,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG))
def test_rescue(self):
instance = self._create_instance({'config_drive': None})
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
network_info = _fake_network_info(self.stubs, 1)
self.mox.StubOutWithMock(self.drvr,
'_get_existing_domain_xml')
self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
self.mox.StubOutWithMock(imagebackend.Backend, 'image')
self.mox.StubOutWithMock(imagebackend.Image, 'cache')
self.mox.StubOutWithMock(self.drvr, '_get_guest_xml')
self.mox.StubOutWithMock(self.drvr, '_destroy')
self.mox.StubOutWithMock(self.drvr, '_create_domain')
self.drvr._get_existing_domain_xml(mox.IgnoreArg(),
mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'disk.rescue', 'default'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
user_id=mox.IgnoreArg()).MultipleTimes()
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
size=None, user_id=mox.IgnoreArg())
image_meta = {'id': 'fake', 'name': 'fake'}
self.drvr._get_guest_xml(mox.IgnoreArg(), instance,
network_info, mox.IgnoreArg(),
mox.IsA(objects.ImageMeta),
rescue=mox.IgnoreArg(),
write_to_disk=mox.IgnoreArg()
).AndReturn(dummyxml)
self.drvr._destroy(instance)
self.drvr._create_domain(mox.IgnoreArg())
self.mox.ReplayAll()
rescue_password = 'fake_password'
self.drvr.rescue(self.context, instance,
network_info, image_meta, rescue_password)
self.mox.VerifyAll()
@mock.patch.object(libvirt_utils, 'get_instance_path')
@mock.patch.object(libvirt_utils, 'load_file')
@mock.patch.object(host.Host, "get_domain")
def test_unrescue(self, mock_get_domain, mock_load_file,
mock_get_instance_path):
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='block' device='disk'>"
"<source dev='/dev/some-vg/some-lv'/>"
"<target dev='vda' bus='virtio'/></disk>"
"</devices></domain>")
mock_get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake=uuid', id=1)
fake_dom = FakeVirtDomain(fake_xml=dummyxml)
mock_get_domain.return_value = fake_dom
mock_load_file.return_value = "fake_unrescue_xml"
unrescue_xml_path = os.path.join('/path', 'unrescue.xml')
rescue_file = os.path.join('/path', 'rescue.file')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(drvr, '_destroy'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(libvirt_utils, 'file_delete'),
mock.patch.object(drvr, '_lvm_disks',
return_value=['lvm.rescue']),
mock.patch.object(lvm, 'remove_volumes'),
mock.patch.object(glob, 'iglob', return_value=[rescue_file])
) as (mock_destroy, mock_create, mock_del, mock_lvm_disks,
mock_remove_volumes, mock_glob):
drvr.unrescue(instance, None)
mock_destroy.assert_called_once_with(instance)
mock_create.assert_called_once_with("fake_unrescue_xml",
fake_dom)
self.assertEqual(2, mock_del.call_count)
self.assertEqual(unrescue_xml_path,
mock_del.call_args_list[0][0][0])
self.assertEqual(rescue_file, mock_del.call_args_list[1][0][0])
mock_remove_volumes.assert_called_once_with(['lvm.rescue'])
@mock.patch(
'nova.virt.configdrive.ConfigDriveBuilder.add_instance_metadata')
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder.make_drive')
def test_rescue_config_drive(self, mock_make, mock_add):
instance = self._create_instance()
uuid = instance.uuid
configdrive_path = uuid + '/disk.config.rescue'
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
network_info = _fake_network_info(self.stubs, 1)
self.mox.StubOutWithMock(self.drvr,
'_get_existing_domain_xml')
self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
self.mox.StubOutWithMock(imagebackend.Backend, 'image')
self.mox.StubOutWithMock(imagebackend.Image, 'cache')
self.mox.StubOutWithMock(instance_metadata.InstanceMetadata,
'__init__')
self.mox.StubOutWithMock(self.drvr, '_get_guest_xml')
self.mox.StubOutWithMock(self.drvr, '_destroy')
self.mox.StubOutWithMock(self.drvr, '_create_domain')
self.drvr._get_existing_domain_xml(mox.IgnoreArg(),
mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'disk.rescue', 'default'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'disk.config.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
user_id=mox.IgnoreArg()).MultipleTimes()
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
size=None, user_id=mox.IgnoreArg())
instance_metadata.InstanceMetadata.__init__(mox.IgnoreArg(),
content=mox.IgnoreArg(),
extra_md=mox.IgnoreArg(),
network_info=mox.IgnoreArg())
image_meta = {'id': 'fake', 'name': 'fake'}
self.drvr._get_guest_xml(mox.IgnoreArg(), instance,
network_info, mox.IgnoreArg(),
mox.IsA(objects.ImageMeta),
rescue=mox.IgnoreArg(),
write_to_disk=mox.IgnoreArg()
).AndReturn(dummyxml)
self.drvr._destroy(instance)
self.drvr._create_domain(mox.IgnoreArg())
self.mox.ReplayAll()
rescue_password = 'fake_password'
self.drvr.rescue(self.context, instance, network_info,
image_meta, rescue_password)
self.mox.VerifyAll()
mock_add.assert_any_call(mock.ANY)
expected_call = [mock.call(os.path.join(CONF.instances_path,
configdrive_path))]
mock_make.assert_has_calls(expected_call)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files(self, get_instance_path, exists, exe,
shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
exe.assert_called_with('mv', '/path', '/path_del')
shutil.assert_called_with('/path_del')
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('os.kill')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_kill_running(
self, get_instance_path, kill, exists, exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
self.drvr.job_tracker.jobs[instance.uuid] = [3, 4]
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
exe.assert_called_with('mv', '/path', '/path_del')
kill.assert_has_calls([mock.call(3, signal.SIGKILL), mock.call(3, 0),
mock.call(4, signal.SIGKILL), mock.call(4, 0)])
shutil.assert_called_with('/path_del')
self.assertTrue(result)
self.assertNotIn(instance.uuid, self.drvr.job_tracker.jobs)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_resize(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = [Exception(), None]
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')]
self.assertEqual(expected, exe.mock_calls)
shutil.assert_called_with('/path_del')
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_failed(self, get_instance_path, exists, exe,
shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
exists.side_effect = [False, False, True, True]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
exe.assert_called_with('mv', '/path', '/path_del')
shutil.assert_called_with('/path_del')
self.assertFalse(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_mv_failed(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = Exception()
exists.side_effect = [True, True]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')] * 2
self.assertEqual(expected, exe.mock_calls)
self.assertFalse(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_resume(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = Exception()
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')] * 2
self.assertEqual(expected, exe.mock_calls)
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_none(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = Exception()
exists.side_effect = [False, False, False, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')] * 2
self.assertEqual(expected, exe.mock_calls)
self.assertEqual(0, len(shutil.mock_calls))
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_concurrent(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = [Exception(), Exception(), None]
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')]
expected.append(expected[0])
self.assertEqual(expected, exe.mock_calls)
shutil.assert_called_with('/path_del')
self.assertTrue(result)
def _assert_on_id_map(self, idmap, klass, start, target, count):
self.assertIsInstance(idmap, klass)
self.assertEqual(start, idmap.start)
self.assertEqual(target, idmap.target)
self.assertEqual(count, idmap.count)
def test_get_id_maps(self):
self.flags(virt_type="lxc", group="libvirt")
CONF.libvirt.virt_type = "lxc"
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(len(idmaps), 4)
self._assert_on_id_map(idmaps[0],
vconfig.LibvirtConfigGuestUIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[1],
vconfig.LibvirtConfigGuestUIDMap,
1, 20000, 10)
self._assert_on_id_map(idmaps[2],
vconfig.LibvirtConfigGuestGIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[3],
vconfig.LibvirtConfigGuestGIDMap,
1, 20000, 10)
def test_get_id_maps_not_lxc(self):
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(0, len(idmaps))
def test_get_id_maps_only_uid(self):
self.flags(virt_type="lxc", group="libvirt")
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
CONF.libvirt.gid_maps = []
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(2, len(idmaps))
self._assert_on_id_map(idmaps[0],
vconfig.LibvirtConfigGuestUIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[1],
vconfig.LibvirtConfigGuestUIDMap,
1, 20000, 10)
def test_get_id_maps_only_gid(self):
self.flags(virt_type="lxc", group="libvirt")
CONF.libvirt.uid_maps = []
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(2, len(idmaps))
self._assert_on_id_map(idmaps[0],
vconfig.LibvirtConfigGuestGIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[1],
vconfig.LibvirtConfigGuestGIDMap,
1, 20000, 10)
def test_instance_on_disk(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(uuid='fake-uuid', id=1)
self.assertFalse(drvr.instance_on_disk(instance))
def test_instance_on_disk_rbd(self):
self.flags(images_type='rbd', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(uuid='fake-uuid', id=1)
self.assertTrue(drvr.instance_on_disk(instance))
def test_get_interfaces(self):
dom_xml = """
<domain type="qemu">
<devices>
<interface type="ethernet">
<mac address="fe:eb:da:ed:ef:ac"/>
<model type="virtio"/>
<target dev="eth0"/>
</interface>
<interface type="bridge">
<mac address="ca:fe:de:ad:be:ef"/>
<model type="virtio"/>
<target dev="br0"/>
</interface>
</devices>
</domain>"""
list_interfaces = ['eth0', 'br0']
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(list_interfaces, drv._get_interfaces(dom_xml))
def test_get_disk_xml(self):
dom_xml = """
<domain type="kvm">
<devices>
<disk type="file">
<source file="disk1_file"/>
<target dev="vda" bus="virtio"/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type="block">
<source dev="/path/to/dev/1"/>
<target dev="vdb" bus="virtio" serial="1234"/>
</disk>
</devices>
</domain>
"""
diska_xml = """<disk type="file" device="disk">
<source file="disk1_file"/>
<target bus="virtio" dev="vda"/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>"""
diskb_xml = """<disk type="block" device="disk">
<source dev="/path/to/dev/1"/>
<target bus="virtio" dev="vdb"/>
</disk>"""
dom = mock.MagicMock()
dom.XMLDesc.return_value = dom_xml
guest = libvirt_guest.Guest(dom)
# NOTE(gcb): etree.tostring(node) returns an extra line with
# some white spaces, need to strip it.
actual_diska_xml = guest.get_disk('vda').to_xml()
self.assertEqual(diska_xml.strip(), actual_diska_xml.strip())
actual_diskb_xml = guest.get_disk('vdb').to_xml()
self.assertEqual(diskb_xml.strip(), actual_diskb_xml.strip())
self.assertIsNone(guest.get_disk('vdc'))
def test_vcpu_model_from_config(self):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
vcpu_model = drv._cpu_config_to_vcpu_model(None, None)
self.assertIsNone(vcpu_model)
cpu = vconfig.LibvirtConfigGuestCPU()
feature1 = vconfig.LibvirtConfigGuestCPUFeature()
feature2 = vconfig.LibvirtConfigGuestCPUFeature()
feature1.name = 'sse'
feature1.policy = cpumodel.POLICY_REQUIRE
feature2.name = 'aes'
feature2.policy = cpumodel.POLICY_REQUIRE
cpu.features = set([feature1, feature2])
cpu.mode = cpumodel.MODE_CUSTOM
cpu.sockets = 1
cpu.cores = 2
cpu.threads = 4
vcpu_model = drv._cpu_config_to_vcpu_model(cpu, None)
self.assertEqual(cpumodel.MATCH_EXACT, vcpu_model.match)
self.assertEqual(cpumodel.MODE_CUSTOM, vcpu_model.mode)
self.assertEqual(4, vcpu_model.topology.threads)
self.assertEqual(set(['sse', 'aes']),
set([f.name for f in vcpu_model.features]))
cpu.mode = cpumodel.MODE_HOST_MODEL
vcpu_model_1 = drv._cpu_config_to_vcpu_model(cpu, vcpu_model)
self.assertEqual(cpumodel.MODE_HOST_MODEL, vcpu_model.mode)
self.assertEqual(vcpu_model, vcpu_model_1)
@mock.patch.object(lvm, 'get_volume_size', return_value=10)
@mock.patch.object(host.Host, "get_guest")
@mock.patch.object(dmcrypt, 'delete_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.unfilter_instance')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
@mock.patch.object(objects.Instance, 'save')
def test_cleanup_lvm_encrypted(self, mock_save, mock_undefine_domain,
mock_unfilter, mock_delete_volume,
mock_get_guest, mock_get_size):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance = objects.Instance(uuid='fake-uuid', id=1,
ephemeral_key_uuid='000-000-000')
instance.system_metadata = {}
block_device_info = {'root_device_name': '/dev/vda',
'ephemerals': [],
'block_device_mapping': []}
self.flags(images_type="lvm",
group='libvirt')
dom_xml = """
<domain type="kvm">
<devices>
<disk type="block">
<driver name='qemu' type='raw' cache='none'/>
<source dev="/dev/mapper/fake-dmcrypt"/>
<target dev="vda" bus="virtio" serial="1234"/>
</disk>
</devices>
</domain>
"""
dom = mock.MagicMock()
dom.XMLDesc.return_value = dom_xml
guest = libvirt_guest.Guest(dom)
mock_get_guest.return_value = guest
drv.cleanup(self.context, instance, 'fake_network', destroy_vifs=False,
block_device_info=block_device_info)
mock_delete_volume.assert_called_once_with('/dev/mapper/fake-dmcrypt')
@mock.patch.object(lvm, 'get_volume_size', return_value=10)
@mock.patch.object(host.Host, "get_guest")
@mock.patch.object(dmcrypt, 'delete_volume')
def _test_cleanup_lvm(self, mock_delete_volume, mock_get_guest, mock_size,
encrypted=False):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance = objects.Instance(uuid='fake-uuid', id=1,
ephemeral_key_uuid='000-000-000')
block_device_info = {'root_device_name': '/dev/vda',
'ephemerals': [],
'block_device_mapping': []}
dev_name = 'fake-dmcrypt' if encrypted else 'fake'
dom_xml = """
<domain type="kvm">
<devices>
<disk type="block">
<driver name='qemu' type='raw' cache='none'/>
<source dev="/dev/mapper/%s"/>
<target dev="vda" bus="virtio" serial="1234"/>
</disk>
</devices>
</domain>
""" % dev_name
dom = mock.MagicMock()
dom.XMLDesc.return_value = dom_xml
guest = libvirt_guest.Guest(dom)
mock_get_guest.return_value = guest
drv._cleanup_lvm(instance, block_device_info)
if encrypted:
mock_delete_volume.assert_called_once_with(
'/dev/mapper/fake-dmcrypt')
else:
self.assertFalse(mock_delete_volume.called)
def test_cleanup_lvm(self):
self._test_cleanup_lvm()
def test_cleanup_encrypted_lvm(self):
self._test_cleanup_lvm(encrypted=True)
def test_vcpu_model_to_config(self):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
feature = objects.VirtCPUFeature(policy=cpumodel.POLICY_REQUIRE,
name='sse')
feature_1 = objects.VirtCPUFeature(policy=cpumodel.POLICY_FORBID,
name='aes')
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=4)
vcpu_model = objects.VirtCPUModel(mode=cpumodel.MODE_HOST_MODEL,
features=[feature, feature_1],
topology=topo)
cpu = drv._vcpu_model_to_cpu_config(vcpu_model)
self.assertEqual(cpumodel.MODE_HOST_MODEL, cpu.mode)
self.assertEqual(1, cpu.sockets)
self.assertEqual(4, cpu.threads)
self.assertEqual(2, len(cpu.features))
self.assertEqual(set(['sse', 'aes']),
set([f.name for f in cpu.features]))
self.assertEqual(set([cpumodel.POLICY_REQUIRE,
cpumodel.POLICY_FORBID]),
set([f.policy for f in cpu.features]))
class LibvirtVolumeUsageTestCase(test.NoDBTestCase):
"""Test for LibvirtDriver.get_all_volume_usage."""
def setUp(self):
super(LibvirtVolumeUsageTestCase, self).setUp()
self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.c = context.get_admin_context()
self.ins_ref = objects.Instance(
id=1729,
uuid='875a8070-d0b9-4949-8b31-104d125c9a64'
)
# verify bootable volume device path also
self.bdms = [{'volume_id': 1,
'device_name': '/dev/vde'},
{'volume_id': 2,
'device_name': 'vda'}]
def test_get_all_volume_usage(self):
def fake_block_stats(instance_name, disk):
return (169, 688640, 0, 0, -1)
self.stubs.Set(self.drvr, 'block_stats', fake_block_stats)
vol_usage = self.drvr.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
expected_usage = [{'volume': 1,
'instance': self.ins_ref,
'rd_bytes': 688640, 'wr_req': 0,
'rd_req': 169, 'wr_bytes': 0},
{'volume': 2,
'instance': self.ins_ref,
'rd_bytes': 688640, 'wr_req': 0,
'rd_req': 169, 'wr_bytes': 0}]
self.assertEqual(vol_usage, expected_usage)
def test_get_all_volume_usage_device_not_found(self):
def fake_get_domain(self, instance):
raise exception.InstanceNotFound(instance_id="fakedom")
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
vol_usage = self.drvr.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
self.assertEqual(vol_usage, [])
class LibvirtNonblockingTestCase(test.NoDBTestCase):
"""Test libvirtd calls are nonblocking."""
def setUp(self):
super(LibvirtNonblockingTestCase, self).setUp()
self.flags(connection_uri="test:///default",
group='libvirt')
def test_connection_to_primitive(self):
# Test bug 962840.
import nova.virt.libvirt.driver as libvirt_driver
drvr = libvirt_driver.LibvirtDriver('')
drvr.set_host_enabled = mock.Mock()
jsonutils.to_primitive(drvr._conn, convert_instances=True)
def test_tpool_execute_calls_libvirt(self):
conn = fakelibvirt.virConnect()
conn.is_expected = True
self.mox.StubOutWithMock(eventlet.tpool, 'execute')
eventlet.tpool.execute(
fakelibvirt.openAuth,
'test:///default',
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(conn)
eventlet.tpool.execute(
conn.domainEventRegisterAny,
None,
fakelibvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
mox.IgnoreArg(),
mox.IgnoreArg())
if hasattr(fakelibvirt.virConnect, 'registerCloseCallback'):
eventlet.tpool.execute(
conn.registerCloseCallback,
mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
c = driver._get_connection()
self.assertEqual(True, c.is_expected)
class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
"""Tests for libvirtDriver.volume_snapshot_create/delete."""
def setUp(self):
super(LibvirtVolumeSnapshotTestCase, self).setUp()
self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.c = context.get_admin_context()
self.flags(instance_name_template='instance-%s')
self.flags(qemu_allowed_storage_drivers=[], group='libvirt')
# creating instance
self.inst = {}
self.inst['uuid'] = uuidutils.generate_uuid()
self.inst['id'] = '1'
# create domain info
self.dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio' serial='1234'/>
</disk>
</devices>
</domain>"""
# alternate domain info with network-backed snapshot chain
self.dom_netdisk_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial>
</disk>
<disk type='network' device='disk'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/root.img'>
<host name='server1' port='24007'/>
</source>
<backingStore type='network' index='1'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/snap.img'>
<host name='server1' port='24007'/>
</source>
<backingStore type='network' index='2'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/snap-b.img'>
<host name='server1' port='24007'/>
</source>
<backingStore/>
</backingStore>
</backingStore>
<target dev='vdb' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
</devices>
</domain>
"""
# XML with netdisk attached, and 1 snapshot taken
self.dom_netdisk_xml_2 = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial>
</disk>
<disk type='network' device='disk'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/snap.img'>
<host name='server1' port='24007'/>
</source>
<backingStore type='network' index='1'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/root.img'>
<host name='server1' port='24007'/>
</source>
<backingStore/>
</backingStore>
<target dev='vdb' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
</devices>
</domain>
"""
self.create_info = {'type': 'qcow2',
'snapshot_id': '1234-5678',
'new_file': 'new-file'}
self.volume_uuid = '0e38683e-f0af-418f-a3f1-6b67ea0f919d'
self.snapshot_id = '9c3ca9f4-9f4e-4dba-bedd-5c5e4b52b162'
self.delete_info_1 = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': None}
self.delete_info_2 = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': 'other-snap.img'}
self.delete_info_3 = {'type': 'qcow2',
'file_to_merge': None,
'merge_target_file': None}
self.delete_info_netdisk = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': 'root.img'}
self.delete_info_invalid_type = {'type': 'made_up_type',
'file_to_merge': 'some_file',
'merge_target_file':
'some_other_file'}
def tearDown(self):
super(LibvirtVolumeSnapshotTestCase, self).tearDown()
@mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.'
'refresh_connection_info')
@mock.patch('nova.objects.block_device.BlockDeviceMapping.'
'get_by_volume_id')
def test_volume_refresh_connection_info(self, mock_get_by_volume_id,
mock_refresh_connection_info):
fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
'id': 123,
'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'connection_info': '{"fake": "connection_info"}'})
mock_get_by_volume_id.return_value = fake_bdm
self.drvr._volume_refresh_connection_info(self.c, self.inst,
self.volume_uuid)
mock_get_by_volume_id.assert_called_once_with(self.c, self.volume_uuid)
mock_refresh_connection_info.assert_called_once_with(self.c, self.inst,
self.drvr._volume_api, self.drvr)
def test_volume_snapshot_create(self, quiesce=True):
"""Test snapshot creation with file-based disk."""
self.flags(instance_name_template='instance-%s')
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
instance = objects.Instance(**self.inst)
new_file = 'new-file'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
snap_xml_src = (
'<domainsnapshot>\n'
' <disks>\n'
' <disk name="disk1_file" snapshot="external" type="file">\n'
' <source file="new-file"/>\n'
' </disk>\n'
' <disk name="vdb" snapshot="no"/>\n'
' </disks>\n'
'</domainsnapshot>\n')
# Older versions of libvirt may be missing these.
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
snap_flags_q = (snap_flags |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE)
if quiesce:
domain.snapshotCreateXML(snap_xml_src, snap_flags_q)
else:
domain.snapshotCreateXML(snap_xml_src, snap_flags_q).\
AndRaise(fakelibvirt.libvirtError(
'quiescing failed, no qemu-ga'))
domain.snapshotCreateXML(snap_xml_src, snap_flags).AndReturn(0)
self.mox.ReplayAll()
self.drvr._volume_snapshot_create(self.c, instance, domain,
self.volume_uuid, new_file)
self.mox.VerifyAll()
def test_volume_snapshot_create_libgfapi(self, quiesce=True):
"""Test snapshot creation with libgfapi network disk."""
self.flags(instance_name_template = 'instance-%s')
self.flags(qemu_allowed_storage_drivers = ['gluster'], group='libvirt')
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type='block'>
<source protocol='gluster' name='gluster1/volume-1234'>
<host name='127.3.4.5' port='24007'/>
</source>
<target dev='vdb' bus='virtio' serial='1234'/>
</disk>
</devices>
</domain>"""
instance = objects.Instance(**self.inst)
new_file = 'new-file'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
snap_xml_src = (
'<domainsnapshot>\n'
' <disks>\n'
' <disk name="disk1_file" snapshot="external" type="file">\n'
' <source file="new-file"/>\n'
' </disk>\n'
' <disk name="vdb" snapshot="no"/>\n'
' </disks>\n'
'</domainsnapshot>\n')
# Older versions of libvirt may be missing these.
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
snap_flags_q = (snap_flags |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE)
if quiesce:
domain.snapshotCreateXML(snap_xml_src, snap_flags_q)
else:
domain.snapshotCreateXML(snap_xml_src, snap_flags_q).\
AndRaise(fakelibvirt.libvirtError(
'quiescing failed, no qemu-ga'))
domain.snapshotCreateXML(snap_xml_src, snap_flags).AndReturn(0)
self.mox.ReplayAll()
self.drvr._volume_snapshot_create(self.c, instance, domain,
self.volume_uuid, new_file)
self.mox.VerifyAll()
def test_volume_snapshot_create_noquiesce(self):
self.test_volume_snapshot_create(quiesce=False)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_can_quiesce(self, ver):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.inst)
image_meta = objects.ImageMeta.from_dict(
{"properties": {
"hw_qemu_guest_agent": "yes"}})
self.assertIsNone(self.drvr._can_quiesce(instance, image_meta))
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_can_quiesce_bad_hyp(self, ver):
self.flags(virt_type='xxx', group='libvirt')
instance = objects.Instance(**self.inst)
image_meta = objects.ImageMeta.from_dict(
{"properties": {
"hw_qemu_guest_agent": "yes"}})
self.assertRaises(exception.InstanceQuiesceNotSupported,
self.drvr._can_quiesce, instance, image_meta)
@mock.patch.object(host.Host,
'has_min_version', return_value=False)
def test_can_quiesce_bad_ver(self, ver):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.inst)
image_meta = {"properties": {
"hw_qemu_guest_agent": "yes"}}
self.assertRaises(exception.InstanceQuiesceNotSupported,
self.drvr._can_quiesce, instance, image_meta)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_can_quiesce_agent_not_enable(self, ver):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.inst)
image_meta = objects.ImageMeta.from_dict({})
self.assertRaises(exception.QemuGuestAgentNotEnabled,
self.drvr._can_quiesce, instance, image_meta)
def test_volume_snapshot_create_outer_success(self):
instance = objects.Instance(**self.inst)
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_create')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._volume_snapshot_create(self.c,
instance,
domain,
self.volume_uuid,
self.create_info['new_file'])
self.drvr._volume_api.update_snapshot_status(
self.c, self.create_info['snapshot_id'], 'creating')
self.mox.StubOutWithMock(self.drvr._volume_api, 'get_snapshot')
self.drvr._volume_api.get_snapshot(self.c,
self.create_info['snapshot_id']).AndReturn({'status': 'available'})
self.mox.StubOutWithMock(self.drvr, '_volume_refresh_connection_info')
self.drvr._volume_refresh_connection_info(self.c, instance,
self.volume_uuid)
self.mox.ReplayAll()
self.drvr.volume_snapshot_create(self.c, instance, self.volume_uuid,
self.create_info)
def test_volume_snapshot_create_outer_failure(self):
instance = objects.Instance(**self.inst)
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_create')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._volume_snapshot_create(self.c,
instance,
domain,
self.volume_uuid,
self.create_info['new_file']).\
AndRaise(exception.NovaException('oops'))
self.drvr._volume_api.update_snapshot_status(
self.c, self.create_info['snapshot_id'], 'error')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.drvr.volume_snapshot_create,
self.c,
instance,
self.volume_uuid,
self.create_info)
def test_volume_snapshot_delete_1(self):
"""Deleting newest snapshot -- blockRebase."""
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE flag
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vda', 'snap.img', 0, flags=0)
domain.blockJobInfo('vda', flags=0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vda', flags=0).AndReturn(
{'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8})
def test_volume_snapshot_delete_relative_1(self):
"""Deleting newest snapshot -- blockRebase using relative flag"""
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
guest = libvirt_guest.Guest(domain)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_guest')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_guest(instance).AndReturn(guest)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vda', 'snap.img', 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
domain.blockJobInfo('vda', flags=0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vda', flags=0).AndReturn(
{'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_2(self):
"""Deleting older snapshot -- blockCommit."""
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(exception.Invalid,
self.drvr._volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
snapshot_id,
self.delete_info_2)
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4})
def test_volume_snapshot_delete_relative_2(self):
"""Deleting older snapshot -- blockCommit using relative flag"""
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockCommit('vda', 'other-snap.img', 'snap.img', 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
domain.blockJobInfo('vda', flags=0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vda', flags=0).AndReturn({})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_2)
self.mox.VerifyAll()
def test_volume_snapshot_delete_nonrelative_null_base(self):
# Deleting newest and last snapshot of a volume
# with blockRebase. So base of the new image will be null.
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
guest = libvirt_guest.Guest(domain)
with test.nested(
mock.patch.object(domain, 'XMLDesc', return_value=self.dom_xml),
mock.patch.object(self.drvr._host, 'get_guest',
return_value=guest),
mock.patch.object(self.drvr._host, 'has_min_version',
return_value=True),
mock.patch.object(domain, 'blockRebase'),
mock.patch.object(domain, 'blockJobInfo',
return_value={'cur': 1000, 'end': 1000})
) as (mock_xmldesc, mock_get_guest, mock_has_min_version,
mock_rebase, mock_job_info):
self.drvr._volume_snapshot_delete(self.c, instance,
self.volume_uuid, snapshot_id,
self.delete_info_3)
mock_xmldesc.assert_called_once_with(flags=0)
mock_get_guest.assert_called_once_with(instance)
mock_has_min_version.assert_called_once_with((1, 1, 1,))
mock_rebase.assert_called_once_with('vda', None, 0, flags=0)
mock_job_info.assert_called_once_with('vda', flags=0)
def test_volume_snapshot_delete_netdisk_nonrelative_null_base(self):
# Deleting newest and last snapshot of a network attached volume
# with blockRebase. So base of the new image will be null.
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_netdisk_xml_2)
guest = libvirt_guest.Guest(domain)
with test.nested(
mock.patch.object(domain, 'XMLDesc',
return_value=self.dom_netdisk_xml_2),
mock.patch.object(self.drvr._host, 'get_guest',
return_value=guest),
mock.patch.object(self.drvr._host, 'has_min_version',
return_value=True),
mock.patch.object(domain, 'blockRebase'),
mock.patch.object(domain, 'blockJobInfo',
return_value={'cur': 1000, 'end': 1000})
) as (mock_xmldesc, mock_get_guest, mock_has_min_version,
mock_rebase, mock_job_info):
self.drvr._volume_snapshot_delete(self.c, instance,
self.volume_uuid, snapshot_id,
self.delete_info_3)
mock_xmldesc.assert_called_once_with(flags=0)
mock_get_guest.assert_called_once_with(instance)
mock_has_min_version.assert_called_once_with((1, 1, 1,))
mock_rebase.assert_called_once_with('vdb', None, 0, flags=0)
mock_job_info.assert_called_once_with('vdb', flags=0)
def test_volume_snapshot_delete_outer_success(self):
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete')
self.drvr._volume_snapshot_delete(self.c,
instance,
self.volume_uuid,
snapshot_id,
delete_info=self.delete_info_1)
self.drvr._volume_api.update_snapshot_status(
self.c, snapshot_id, 'deleting')
self.mox.StubOutWithMock(self.drvr, '_volume_refresh_connection_info')
self.drvr._volume_refresh_connection_info(self.c, instance,
self.volume_uuid)
self.mox.ReplayAll()
self.drvr.volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id,
self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_outer_failure(self):
instance = objects.Instance(**self.inst)
snapshot_id = '1234-9876'
FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete')
self.drvr._volume_snapshot_delete(self.c,
instance,
self.volume_uuid,
snapshot_id,
delete_info=self.delete_info_1).\
AndRaise(exception.NovaException('oops'))
self.drvr._volume_api.update_snapshot_status(
self.c, snapshot_id, 'error_deleting')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.drvr.volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
snapshot_id,
self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_invalid_type(self):
instance = objects.Instance(**self.inst)
FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
self.drvr._volume_api.update_snapshot_status(
self.c, self.snapshot_id, 'error_deleting')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.drvr.volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
self.snapshot_id,
self.delete_info_invalid_type)
def test_volume_snapshot_delete_netdisk_1(self):
"""Delete newest snapshot -- blockRebase for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, flags):
return self.dom_netdisk_xml
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vdb', 'vdb[1]', 0, flags=0)
domain.blockJobInfo('vdb', flags=0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vdb', flags=0).AndReturn(
{'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8})
def test_volume_snapshot_delete_netdisk_relative_1(self):
"""Delete newest snapshot -- blockRebase for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, flags):
return self.dom_netdisk_xml
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vdb', 'vdb[1]', 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
domain.blockJobInfo('vdb', flags=0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vdb', flags=0).AndReturn(
{'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_netdisk_2(self):
"""Delete older snapshot -- blockCommit for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, flags):
return self.dom_netdisk_xml
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(exception.Invalid,
self.drvr._volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
snapshot_id,
self.delete_info_netdisk)
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4})
def test_volume_snapshot_delete_netdisk_relative_2(self):
"""Delete older snapshot -- blockCommit for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, flags):
return self.dom_netdisk_xml
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockCommit('vdb', 'vdb[0]', 'vdb[1]', 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
domain.blockJobInfo('vdb', flags=0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vdb', flags=0).AndReturn(
{'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id,
self.delete_info_netdisk)
self.mox.VerifyAll()
def _fake_convert_image(source, dest, out_format,
run_as_root=True):
libvirt_driver.libvirt_utils.files[dest] = ''
class _BaseSnapshotTests(test.NoDBTestCase):
def setUp(self):
super(_BaseSnapshotTests, self).setUp()
self.flags(snapshots_directory='./', group='libvirt')
self.context = context.get_admin_context()
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
self.stubs)
self.mock_update_task_state = mock.Mock()
test_instance = _create_test_instance()
self.instance_ref = objects.Instance(**test_instance)
self.instance_ref.info_cache = objects.InstanceInfoCache(
network_info=None)
def _assert_snapshot(self, snapshot, disk_format,
expected_properties=None):
self.mock_update_task_state.assert_has_calls([
mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD),
mock.call(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)])
props = snapshot['properties']
self.assertEqual(props['image_state'], 'available')
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['disk_format'], disk_format)
self.assertEqual(snapshot['name'], 'test-snap')
if expected_properties:
for expected_key, expected_value in \
six.iteritems(expected_properties):
self.assertEqual(expected_value, props[expected_key])
def _create_image(self, extra_properties=None):
properties = {'instance_id': self.instance_ref['id'],
'user_id': str(self.context.user_id)}
if extra_properties:
properties.update(extra_properties)
sent_meta = {'name': 'test-snap',
'is_public': False,
'status': 'creating',
'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = self.image_service.create(self.context, sent_meta)
return recv_meta
@mock.patch.object(imagebackend.Image, 'resolve_driver_format')
@mock.patch.object(host.Host, 'get_domain')
def _snapshot(self, image_id, mock_get_domain, mock_resolve):
mock_get_domain.return_value = FakeVirtDomain()
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
driver.snapshot(self.context, self.instance_ref, image_id,
self.mock_update_task_state)
snapshot = self.image_service.show(self.context, image_id)
return snapshot
def _test_snapshot(self, disk_format, extra_properties=None):
recv_meta = self._create_image(extra_properties=extra_properties)
snapshot = self._snapshot(recv_meta['id'])
self._assert_snapshot(snapshot, disk_format=disk_format,
expected_properties=extra_properties)
class LibvirtSnapshotTests(_BaseSnapshotTests):
def test_ami(self):
# Assign different image_ref from nova/images/fakes for testing ami
self.instance_ref.image_ref = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
self.instance_ref.system_metadata = \
utils.get_system_metadata_from_image(
{'disk_format': 'ami'})
self._test_snapshot(disk_format='ami')
@mock.patch.object(fake_libvirt_utils, 'disk_type', new='raw')
@mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image',
side_effect=_fake_convert_image)
def test_raw(self, mock_convert_image):
self._test_snapshot(disk_format='raw')
def test_qcow2(self):
self._test_snapshot(disk_format='qcow2')
def test_no_image_architecture(self):
self.instance_ref.image_ref = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
self._test_snapshot(disk_format='qcow2')
def test_no_original_image(self):
self.instance_ref.image_ref = '661122aa-1234-dede-fefe-babababababa'
self._test_snapshot(disk_format='qcow2')
def test_snapshot_metadata_image(self):
# Assign an image with an architecture defined (x86_64)
self.instance_ref.image_ref = 'a440c04b-79fa-479c-bed1-0b816eaec379'
extra_properties = {'architecture': 'fake_arch',
'key_a': 'value_a',
'key_b': 'value_b',
'os_type': 'linux'}
self._test_snapshot(disk_format='qcow2',
extra_properties=extra_properties)
class LXCSnapshotTests(LibvirtSnapshotTests):
"""Repeat all of the Libvirt snapshot tests, but with LXC enabled"""
def setUp(self):
super(LXCSnapshotTests, self).setUp()
self.flags(virt_type='lxc', group='libvirt')
class LVMSnapshotTests(_BaseSnapshotTests):
@mock.patch.object(fake_libvirt_utils, 'disk_type', new='lvm')
@mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image',
side_effect=_fake_convert_image)
@mock.patch.object(libvirt_driver.imagebackend.lvm, 'volume_info')
def _test_lvm_snapshot(self, disk_format, mock_volume_info,
mock_convert_image):
self.flags(images_type='lvm',
images_volume_group='nova-vg', group='libvirt')
self._test_snapshot(disk_format=disk_format)
mock_volume_info.assert_has_calls([mock.call('/dev/nova-vg/lv')])
mock_convert_image.assert_called_once_with(
'/dev/nova-vg/lv', mock.ANY, disk_format, run_as_root=True)
def test_raw(self):
self._test_lvm_snapshot('raw')
def test_qcow2(self):
self.flags(snapshot_image_format='qcow2', group='libvirt')
self._test_lvm_snapshot('qcow2')
|
{
"content_hash": "7da56c7685bcc6e09c78573ee26bf3b4",
"timestamp": "",
"source": "github",
"line_count": 15131,
"max_line_length": 97,
"avg_line_length": 44.57471416297667,
"alnum_prop": 0.5434495744743943,
"repo_name": "raildo/nova",
"id": "8c922b4a5b70878a70411ac11b5e20e06d50fd9a",
"size": "675116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/unit/virt/libvirt/test_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16814792"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "351433"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
"""
record of files
naming for same name files: file.gif, file-1.gif, file-2.gif etc
"""
import frappe, frappe.utils
from frappe.utils.file_manager import delete_file_data_content, get_content_hash, get_random_filename
from frappe import _
from frappe.utils.nestedset import NestedSet
from frappe.utils import strip
import json
import urllib
from PIL import Image, ImageOps
import os
import requests
import requests.exceptions
import StringIO
import mimetypes, imghdr
from frappe.utils import get_files_path
class FolderNotEmpty(frappe.ValidationError): pass
exclude_from_linked_with = True
class File(NestedSet):
nsm_parent_field = 'folder'
no_feed_on_delete = True
def before_insert(self):
frappe.local.rollback_observers.append(self)
self.set_folder_name()
self.set_name()
def get_name_based_on_parent_folder(self):
path = get_breadcrumbs(self.folder)
folder_name = frappe.get_value("File", self.folder, "file_name")
return "/".join([d.file_name for d in path] + [folder_name, self.file_name])
def set_name(self):
"""Set name for folder"""
if self.is_folder:
if self.folder:
self.name = self.get_name_based_on_parent_folder()
else:
# home
self.name = self.file_name
else:
self.name = frappe.generate_hash("", 10)
def after_insert(self):
self.update_parent_folder_size()
def after_rename(self, olddn, newdn, merge=False):
for successor in self.get_successor():
setup_folder_path(successor, self.name)
def get_successor(self):
return frappe.db.sql_list("select name from tabFile where folder='%s'"%self.name) or []
def validate(self):
if self.is_new():
self.validate_duplicate_entry()
self.validate_folder()
if not self.flags.ignore_file_validate:
self.validate_file()
self.generate_content_hash()
self.set_folder_size()
def set_folder_size(self):
"""Set folder size if folder"""
if self.is_folder and not self.is_new():
self.file_size = self.get_folder_size()
frappe.db.set_value("File", self.name, "file_size", self.file_size)
for folder in self.get_ancestors():
frappe.db.set_value("File", folder, "file_size", self.get_folder_size(folder))
def get_folder_size(self, folder=None):
"""Returns folder size for current folder"""
if not folder:
folder = self.name
file_size = frappe.db.sql("""select sum(ifnull(file_size,0))
from tabFile where folder=%s """, (folder))[0][0]
return file_size
def update_parent_folder_size(self):
"""Update size of parent folder"""
if self.folder and not self.is_folder: # it not home
frappe.get_doc("File", self.folder).save(ignore_permissions=True)
def set_folder_name(self):
"""Make parent folders if not exists based on reference doctype and name"""
if self.attached_to_doctype and not self.folder:
self.folder = frappe.db.get_value("File", {"is_attachments_folder": 1})
def validate_folder(self):
if not self.is_home_folder and not self.folder and \
not self.flags.ignore_folder_validate:
frappe.throw(_("Folder is mandatory"))
def validate_file(self):
"""Validates existence of public file
TODO: validate for private file
"""
if (self.file_url or "").startswith("/files/"):
if not self.file_name:
self.file_name = self.file_url.split("/files/")[-1]
if not os.path.exists(get_files_path(self.file_name.lstrip("/"))):
frappe.throw(_("File {0} does not exist").format(self.file_url), IOError)
def validate_duplicate_entry(self):
if not self.flags.ignore_duplicate_entry_error and not self.is_folder:
# check duplicate name
# check duplicate assignement
n_records = frappe.db.sql("""select name from `tabFile`
where content_hash=%s
and name!=%s
and attached_to_doctype=%s
and attached_to_name=%s""", (self.content_hash, self.name, self.attached_to_doctype,
self.attached_to_name))
if len(n_records) > 0:
self.duplicate_entry = n_records[0][0]
frappe.throw(frappe._("Same file has already been attached to the record"), frappe.DuplicateEntryError)
def generate_content_hash(self):
if self.content_hash or not self.file_url:
return
if self.file_url.startswith("/files/"):
try:
with open(get_files_path(self.file_name.lstrip("/")), "r") as f:
self.content_hash = get_content_hash(f.read())
except IOError:
frappe.msgprint(_("File {0} does not exist").format(self.file_url))
raise
def on_trash(self):
if self.is_home_folder or self.is_attachments_folder:
frappe.throw(_("Cannot delete Home and Attachments folders"))
self.check_folder_is_empty()
self.check_reference_doc_permission()
super(File, self).on_trash()
self.delete_file()
def make_thumbnail(self):
if self.file_url:
if self.file_url.startswith("/files"):
try:
image, filename, extn = get_local_image(self.file_url)
except IOError:
return
else:
try:
image, filename, extn = get_web_image(self.file_url)
except (requests.exceptions.HTTPError, requests.exceptions.SSLError, IOError):
return
thumbnail = ImageOps.fit(
image,
(300, 300),
Image.ANTIALIAS
)
thumbnail_url = filename + "_small." + extn
path = os.path.abspath(frappe.get_site_path("public", thumbnail_url.lstrip("/")))
try:
thumbnail.save(path)
self.db_set("thumbnail_url", thumbnail_url)
except IOError:
frappe.msgprint("Unable to write file format for {0}".format(path))
return
return thumbnail_url
def after_delete(self):
self.update_parent_folder_size()
def check_folder_is_empty(self):
"""Throw exception if folder is not empty"""
files = frappe.get_all("File", filters={"folder": self.name}, fields=("name", "file_name"))
if self.is_folder and files:
frappe.throw(_("Folder {0} is not empty").format(self.name), FolderNotEmpty)
def check_reference_doc_permission(self):
"""Check if permission exists for reference document"""
if self.attached_to_name:
# check persmission
try:
if not self.flags.ignore_permissions and \
not frappe.has_permission(self.attached_to_doctype,
"write", self.attached_to_name):
frappe.throw(frappe._("No permission to write / remove."),
frappe.PermissionError)
except frappe.DoesNotExistError:
pass
def delete_file(self):
"""If file not attached to any other record, delete it"""
if self.file_name and self.content_hash and (not frappe.db.count("File",
{"content_hash": self.content_hash, "name": ["!=", self.name]})):
delete_file_data_content(self)
elif self.file_url:
delete_file_data_content(self, only_thumbnail=True)
def on_rollback(self):
self.flags.on_rollback = True
self.on_trash()
def on_doctype_update():
frappe.db.add_index("File", ["attached_to_doctype", "attached_to_name"])
def make_home_folder():
home = frappe.get_doc({
"doctype": "File",
"is_folder": 1,
"is_home_folder": 1,
"file_name": _("Home")
}).insert()
frappe.get_doc({
"doctype": "File",
"folder": home.name,
"is_folder": 1,
"is_attachments_folder": 1,
"file_name": _("Attachments")
}).insert()
@frappe.whitelist()
def get_breadcrumbs(folder):
"""returns name, file_name of parent folder"""
lft, rgt = frappe.db.get_value("File", folder, ["lft", "rgt"])
return frappe.db.sql("""select name, file_name from tabFile
where lft < %s and rgt > %s order by lft asc""", (lft, rgt), as_dict=1)
@frappe.whitelist()
def create_new_folder(file_name, folder):
""" create new folder under current parent folder """
file = frappe.new_doc("File")
file.file_name = file_name
file.is_folder = 1
file.folder = folder
file.insert()
@frappe.whitelist()
def move_file(file_list, new_parent, old_parent):
if isinstance(file_list, basestring):
file_list = json.loads(file_list)
for file_obj in file_list:
setup_folder_path(file_obj.get("name"), new_parent)
# recalculate sizes
frappe.get_doc("File", old_parent).save()
frappe.get_doc("File", new_parent).save()
def setup_folder_path(filename, new_parent):
file = frappe.get_doc("File", filename)
file.folder = new_parent
file.save()
if file.is_folder:
frappe.rename_doc("File", file.name, file.get_name_based_on_parent_folder(), ignore_permissions=True)
def get_extension(filename, extn, content):
mimetype = None
if extn:
mimetype = mimetypes.guess_type(filename + "." + extn)[0]
if mimetype is None or not mimetype.startswith("image/") and content:
# detect file extension by reading image header properties
extn = imghdr.what(filename + "." + (extn or ""), h=content)
return extn
def get_local_image(file_url):
file_path = frappe.get_site_path("public", file_url.lstrip("/"))
try:
image = Image.open(file_path)
except IOError:
frappe.msgprint("Unable to read file format for {0}".format(file_url))
raise
content = None
try:
filename, extn = file_url.rsplit(".", 1)
except ValueError:
# no extn
with open(file_path, "r") as f:
content = f.read()
filename = file_url
extn = None
extn = get_extension(filename, extn, content)
return image, filename, extn
def get_web_image(file_url):
# downlaod
file_url = frappe.utils.get_url(file_url)
r = requests.get(file_url, stream=True)
try:
r.raise_for_status()
except requests.exceptions.HTTPError, e:
if "404" in e.args[0]:
frappe.msgprint(_("File '{0}' not found").format(file_url))
else:
frappe.msgprint("Unable to read file format for {0}".format(file_url))
raise
image = Image.open(StringIO.StringIO(r.content))
try:
filename, extn = file_url.rsplit("/", 1)[1].rsplit(".", 1)
except ValueError:
# the case when the file url doesn't have filename or extension
# but is fetched due to a query string. example: https://encrypted-tbn3.gstatic.com/images?q=something
filename = get_random_filename()
extn = None
extn = get_extension(filename, extn, r.content)
filename = "/files/" + strip(urllib.unquote(filename))
return image, filename, extn
def check_file_permission(file_url):
for file in frappe.get_all("File", filters={"file_url": file_url, "is_private": 1}, fields=["name", "attached_to_doctype", "attached_to_name"]):
if (frappe.has_permission("File", ptype="read", doc=file.name)
or frappe.has_permission(file.attached_to_doctype, ptype="read", doc=file.attached_to_name)):
return True
raise frappe.PermissionError
|
{
"content_hash": "77c237a72f474069f7c1f1444e1488e5",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 145,
"avg_line_length": 29.626436781609197,
"alnum_prop": 0.6919495635305528,
"repo_name": "mbauskar/helpdesk-frappe",
"id": "72bea79686f22e605903898cf59ac8c64a2672aa",
"size": "10411",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "frappe/core/doctype/file/file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "280577"
},
{
"name": "HTML",
"bytes": "1330224"
},
{
"name": "JavaScript",
"bytes": "1095115"
},
{
"name": "Python",
"bytes": "1241484"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
}
|
"""
sphinx.builders.linkcheck
~~~~~~~~~~~~~~~~~~~~~~~~~
The CheckExternalLinksBuilder class.
:copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import socket
import codecs
import threading
from os import path
from six.moves import queue
from six.moves.urllib.request import build_opener, Request, HTTPRedirectHandler
from six.moves.urllib.parse import unquote, urlsplit, quote
from six.moves.urllib.error import HTTPError
from six.moves.html_parser import HTMLParser, HTMLParseError
from docutils import nodes
from sphinx.builders import Builder
from sphinx.util.console import purple, red, darkgreen, darkgray, \
darkred, turquoise
from sphinx.util.pycompat import TextIOWrapper
class RedirectHandler(HTTPRedirectHandler):
"""A RedirectHandler that records the redirect code we got."""
def redirect_request(self, req, fp, code, msg, headers, newurl):
new_req = HTTPRedirectHandler.redirect_request(self, req, fp, code,
msg, headers, newurl)
req.redirect_code = code
return new_req
# create an opener that will simulate a browser user-agent
opener = build_opener(RedirectHandler)
opener.addheaders = [('User-agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:25.0) '
'Gecko/20100101 Firefox/25.0')]
class HeadRequest(Request):
"""Subclass of urllib2.Request that sends a HEAD request."""
def get_method(self):
return 'HEAD'
class AnchorCheckParser(HTMLParser):
"""Specialized HTML parser that looks for a specific anchor."""
def __init__(self, search_anchor):
HTMLParser.__init__(self)
self.search_anchor = search_anchor
self.found = False
def handle_starttag(self, tag, attrs):
for key, value in attrs:
if key in ('id', 'name') and value == self.search_anchor:
self.found = True
def check_anchor(f, hash):
"""Reads HTML data from a filelike object 'f' searching for anchor 'hash'.
Returns True if anchor was found, False otherwise.
"""
parser = AnchorCheckParser(hash)
try:
# Read file in chunks of 8192 bytes. If we find a matching anchor, we
# break the loop early in hopes not to have to download the whole thing.
chunk = f.read(8192)
while chunk and not parser.found:
parser.feed(chunk)
chunk = f.read(8192)
parser.close()
except HTMLParseError:
# HTMLParser is usually pretty good with sloppy HTML, but it tends to
# choke on EOF. But we're done then anyway.
pass
return parser.found
class CheckExternalLinksBuilder(Builder):
"""
Checks for broken external links.
"""
name = 'linkcheck'
def init(self):
self.to_ignore = [re.compile(x) for x in self.app.config.linkcheck_ignore]
self.good = set()
self.broken = {}
self.redirected = {}
# set a timeout for non-responding servers
socket.setdefaulttimeout(5.0)
# create output file
open(path.join(self.outdir, 'output.txt'), 'w').close()
# create queues and worker threads
self.wqueue = queue.Queue()
self.rqueue = queue.Queue()
self.workers = []
for i in range(self.app.config.linkcheck_workers):
thread = threading.Thread(target=self.check_thread)
thread.setDaemon(True)
thread.start()
self.workers.append(thread)
def check_thread(self):
kwargs = {}
if self.app.config.linkcheck_timeout:
kwargs['timeout'] = self.app.config.linkcheck_timeout
def check():
# check for various conditions without bothering the network
if len(uri) == 0 or uri[0] == '#' or \
uri[0:7] == 'mailto:' or uri[0:4] == 'ftp:':
return 'unchecked', '', 0
elif not (uri[0:5] == 'http:' or uri[0:6] == 'https:'):
return 'local', '', 0
elif uri in self.good:
return 'working', 'old', 0
elif uri in self.broken:
return 'broken', self.broken[uri], 0
elif uri in self.redirected:
return 'redirected', self.redirected[uri][0], self.redirected[uri][1]
for rex in self.to_ignore:
if rex.match(uri):
return 'ignored', '', 0
# split off anchor
if '#' in uri:
req_url, hash = uri.split('#', 1)
else:
req_url = uri
hash = None
# handle non-ASCII URIs
try:
req_url.encode('ascii')
except UnicodeError:
split = urlsplit(req_url)
req_url = (split[0].encode() + '://' + # scheme
split[1].encode('idna') + # netloc
quote(split[2].encode('utf-8'))) # path
if split[3]: # query
req_url += '?' + quote(split[3].encode('utf-8'))
# go back to Unicode strings which is required by Python 3
# (but now all parts are pure ascii)
req_url = req_url.decode('ascii')
# need to actually check the URI
try:
if hash and self.app.config.linkcheck_anchors:
# Read the whole document and see if #hash exists
req = Request(req_url)
f = opener.open(req, **kwargs)
encoding = 'utf-8'
if hasattr(f.headers, 'get_content_charset'):
encoding = f.headers.get_content_charset() or encoding
found = check_anchor(TextIOWrapper(f, encoding), unquote(hash))
f.close()
if not found:
raise Exception("Anchor '%s' not found" % hash)
else:
try:
# try a HEAD request, which should be easier on
# the server and the network
req = HeadRequest(req_url)
f = opener.open(req, **kwargs)
f.close()
except HTTPError as err:
if err.code != 405:
raise
# retry with GET if that fails, some servers
# don't like HEAD requests and reply with 405
req = Request(req_url)
f = opener.open(req, **kwargs)
f.close()
except HTTPError as err:
if err.code == 401:
# We'll take "Unauthorized" as working.
self.good.add(uri)
return 'working', ' - unauthorized', 0
else:
self.broken[uri] = str(err)
return 'broken', str(err), 0
except Exception as err:
self.broken[uri] = str(err)
return 'broken', str(err), 0
if f.url.rstrip('/') == req_url.rstrip('/'):
self.good.add(uri)
return 'working', '', 0
else:
new_url = f.url
if hash:
new_url += '#' + hash
code = getattr(req, 'redirect_code', 0)
self.redirected[uri] = (new_url, code)
return 'redirected', new_url, code
while True:
uri, docname, lineno = self.wqueue.get()
if uri is None:
break
status, info, code = check()
self.rqueue.put((uri, docname, lineno, status, info, code))
def process_result(self, result):
uri, docname, lineno, status, info, code = result
if status == 'unchecked':
return
if status == 'working' and info == 'old':
return
if lineno:
self.info('(line %4d) ' % lineno, nonl=1)
if status == 'ignored':
self.info(darkgray('-ignored- ') + uri)
elif status == 'local':
self.info(darkgray('-local- ') + uri)
self.write_entry('local', docname, lineno, uri)
elif status == 'working':
self.info(darkgreen('ok ') + uri + info)
elif status == 'broken':
self.info(red('broken ') + uri + red(' - ' + info))
self.write_entry('broken', docname, lineno, uri + ': ' + info)
if self.app.quiet:
self.warn('broken link: %s' % uri,
'%s:%s' % (self.env.doc2path(docname), lineno))
elif status == 'redirected':
text, color = {
301: ('permanently', darkred),
302: ('with Found', purple),
303: ('with See Other', purple),
307: ('temporarily', turquoise),
0: ('with unknown code', purple),
}[code]
self.write_entry('redirected ' + text, docname, lineno,
uri + ' to ' + info)
self.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info))
def get_target_uri(self, docname, typ=None):
return ''
def get_outdated_docs(self):
return self.env.found_docs
def prepare_writing(self, docnames):
return
def write_doc(self, docname, doctree):
self.info()
n = 0
for node in doctree.traverse(nodes.reference):
if 'refuri' not in node:
continue
uri = node['refuri']
lineno = None
while lineno is None:
node = node.parent
if node is None:
break
lineno = node.line
self.wqueue.put((uri, docname, lineno), False)
n += 1
done = 0
while done < n:
self.process_result(self.rqueue.get())
done += 1
if self.broken:
self.app.statuscode = 1
def write_entry(self, what, docname, line, uri):
output = codecs.open(path.join(self.outdir, 'output.txt'), 'a', 'utf-8')
output.write("%s:%s: [%s] %s\n" % (self.env.doc2path(docname, None),
line, what, uri))
output.close()
def finish(self):
for worker in self.workers:
self.wqueue.put((None, None, None), False)
|
{
"content_hash": "4488346f30d6a3b8272e93dcd3a0a46e",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 87,
"avg_line_length": 37.12237762237762,
"alnum_prop": 0.5157765847226147,
"repo_name": "ArcherSys/ArcherSys",
"id": "9f5c2131c23091092ec226a5e9fa8e5142eb6aed",
"size": "10641",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Lib/site-packages/sphinx/builders/linkcheck.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
'''
Custom filters for use in openshift-ansible
'''
from ansible import errors
from operator import itemgetter
import pdb
import re
import json
class FilterModule(object):
''' Custom ansible filters '''
@staticmethod
def oo_pdb(arg):
''' This pops you into a pdb instance where arg is the data passed in
from the filter.
Ex: "{{ hostvars | oo_pdb }}"
'''
pdb.set_trace()
return arg
@staticmethod
def get_attr(data, attribute=None):
''' This looks up dictionary attributes of the form a.b.c and returns
the value.
Ex: data = {'a': {'b': {'c': 5}}}
attribute = "a.b.c"
returns 5
'''
if not attribute:
raise errors.AnsibleFilterError("|failed expects attribute to be set")
ptr = data
for attr in attribute.split('.'):
ptr = ptr[attr]
return ptr
@staticmethod
def oo_flatten(data):
''' This filter plugin will flatten a list of lists
'''
if not issubclass(type(data), list):
raise errors.AnsibleFilterError("|failed expects to flatten a List")
return [item for sublist in data for item in sublist]
@staticmethod
def oo_collect(data, attribute=None, filters=None):
''' This takes a list of dict and collects all attributes specified into a
list. If filter is specified then we will include all items that
match _ALL_ of filters. If a dict entry is missing the key in a
filter it will be excluded from the match.
Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
{'a':2, 'z': 'z'}, # True, return
{'a':3, 'z': 'z'}, # True, return
{'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z']
]
attribute = 'a'
filters = {'z': 'z'}
returns [1, 2, 3]
'''
if not issubclass(type(data), list):
raise errors.AnsibleFilterError("|failed expects to filter on a List")
if not attribute:
raise errors.AnsibleFilterError("|failed expects attribute to be set")
if filters is not None:
if not issubclass(type(filters), dict):
raise errors.AnsibleFilterError("|fialed expects filter to be a"
" dict")
retval = [FilterModule.get_attr(d, attribute) for d in data if (
all([d.get(key, None) == filters[key] for key in filters]))]
else:
retval = [FilterModule.get_attr(d, attribute) for d in data]
return retval
@staticmethod
def oo_select_keys(data, keys):
''' This returns a list, which contains the value portions for the keys
Ex: data = { 'a':1, 'b':2, 'c':3 }
keys = ['a', 'c']
returns [1, 3]
'''
if not issubclass(type(data), dict):
raise errors.AnsibleFilterError("|failed expects to filter on a dict")
if not issubclass(type(keys), list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
# Gather up the values for the list of keys passed in
retval = [data[key] for key in keys]
return retval
@staticmethod
def oo_prepend_strings_in_list(data, prepend):
''' This takes a list of strings and prepends a string to each item in the
list
Ex: data = ['cart', 'tree']
prepend = 'apple-'
returns ['apple-cart', 'apple-tree']
'''
if not issubclass(type(data), list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
if not all(isinstance(x, basestring) for x in data):
raise errors.AnsibleFilterError("|failed expects first param is a list"
" of strings")
retval = [prepend + s for s in data]
return retval
@staticmethod
def oo_combine_key_value(data, joiner='='):
'''Take a list of dict in the form of { 'key': 'value'} and
arrange them as a list of strings ['key=value']
'''
if not issubclass(type(data), list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
rval = []
for item in data:
rval.append("%s%s%s" % (item['key'], joiner, item['value']))
return rval
@staticmethod
def oo_ami_selector(data, image_name):
''' This takes a list of amis and an image name and attempts to return
the latest ami.
'''
if not issubclass(type(data), list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
if not data:
return None
else:
if image_name is None or not image_name.endswith('_*'):
ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
return ami['ami_id']
else:
ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
return ami['ami_id']
@staticmethod
def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
''' This takes a dictionary of volume definitions and returns a valid ec2
volume definition based on the host_type and the values in the
dictionary.
The dictionary should look similar to this:
{ 'master':
{ 'root':
{ 'volume_size': 10, 'device_type': 'gp2',
'iops': 500
}
},
'node':
{ 'root':
{ 'volume_size': 10, 'device_type': 'io1',
'iops': 1000
},
'docker':
{ 'volume_size': 40, 'device_type': 'gp2',
'iops': 500, 'ephemeral': 'true'
}
}
}
'''
if not issubclass(type(data), dict):
raise errors.AnsibleFilterError("|failed expects first param is a dict")
if host_type not in ['master', 'node', 'etcd']:
raise errors.AnsibleFilterError("|failed expects etcd, master or node"
" as the host type")
root_vol = data[host_type]['root']
root_vol['device_name'] = '/dev/sda1'
root_vol['delete_on_termination'] = True
if root_vol['device_type'] != 'io1':
root_vol.pop('iops', None)
if host_type == 'node':
docker_vol = data[host_type]['docker']
docker_vol['device_name'] = '/dev/xvdb'
docker_vol['delete_on_termination'] = True
if docker_vol['device_type'] != 'io1':
docker_vol.pop('iops', None)
if docker_ephemeral:
docker_vol.pop('device_type', None)
docker_vol.pop('delete_on_termination', None)
docker_vol['ephemeral'] = 'ephemeral0'
return [root_vol, docker_vol]
elif host_type == 'etcd':
etcd_vol = data[host_type]['etcd']
etcd_vol['device_name'] = '/dev/xvdb'
etcd_vol['delete_on_termination'] = True
if etcd_vol['device_type'] != 'io1':
etcd_vol.pop('iops', None)
return [root_vol, etcd_vol]
return [root_vol]
@staticmethod
def oo_split(string, separator=','):
''' This splits the input string into a list
'''
return string.split(separator)
@staticmethod
def oo_filter_list(data, filter_attr=None):
''' This returns a list, which contains all items where filter_attr
evaluates to true
Ex: data = [ { a: 1, b: True },
{ a: 3, b: False },
{ a: 5, b: True } ]
filter_attr = 'b'
returns [ { a: 1, b: True },
{ a: 5, b: True } ]
'''
if not issubclass(type(data), list):
raise errors.AnsibleFilterError("|failed expects to filter on a list")
if not issubclass(type(filter_attr), str):
raise errors.AnsibleFilterError("|failed expects filter_attr is a str")
# Gather up the values for the list of keys passed in
return [x for x in data if x[filter_attr]]
@staticmethod
def oo_parse_heat_stack_outputs(data):
''' Formats the HEAT stack output into a usable form
The goal is to transform something like this:
+---------------+-------------------------------------------------+
| Property | Value |
+---------------+-------------------------------------------------+
| capabilities | [] | |
| creation_time | 2015-06-26T12:26:26Z | |
| description | OpenShift cluster | |
| … | … |
| outputs | [ |
| | { |
| | "output_value": "value_A" |
| | "description": "This is the value of Key_A" |
| | "output_key": "Key_A" |
| | }, |
| | { |
| | "output_value": [ |
| | "value_B1", |
| | "value_B2" |
| | ], |
| | "description": "This is the value of Key_B" |
| | "output_key": "Key_B" |
| | }, |
| | ] |
| parameters | { |
| … | … |
+---------------+-------------------------------------------------+
into something like this:
{
"Key_A": "value_A",
"Key_B": [
"value_B1",
"value_B2"
]
}
'''
# Extract the “outputs” JSON snippet from the pretty-printed array
in_outputs = False
outputs = ''
line_regex = re.compile(r'\|\s*(.*?)\s*\|\s*(.*?)\s*\|')
for line in data['stdout_lines']:
match = line_regex.match(line)
if match:
if match.group(1) == 'outputs':
in_outputs = True
elif match.group(1) != '':
in_outputs = False
if in_outputs:
outputs += match.group(2)
outputs = json.loads(outputs)
# Revamp the “outputs” to put it in the form of a “Key: value” map
revamped_outputs = {}
for output in outputs:
revamped_outputs[output['output_key']] = output['output_value']
return revamped_outputs
def filters(self):
''' returns a mapping of filters to methods '''
return {
"oo_select_keys": self.oo_select_keys,
"oo_collect": self.oo_collect,
"oo_flatten": self.oo_flatten,
"oo_pdb": self.oo_pdb,
"oo_prepend_strings_in_list": self.oo_prepend_strings_in_list,
"oo_ami_selector": self.oo_ami_selector,
"oo_ec2_volume_definition": self.oo_ec2_volume_definition,
"oo_combine_key_value": self.oo_combine_key_value,
"oo_split": self.oo_split,
"oo_filter_list": self.oo_filter_list,
"oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs
}
|
{
"content_hash": "75b266530951b0e349f5e5d962763400",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 84,
"avg_line_length": 40.65064102564103,
"alnum_prop": 0.4499724040053615,
"repo_name": "jdamick/openshift-ansible",
"id": "47033a88e1ce21613ecec1e5bf96c5791726bcf9",
"size": "12785",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "filter_plugins/oo_filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "245312"
},
{
"name": "Ruby",
"bytes": "3270"
},
{
"name": "Shell",
"bytes": "10297"
},
{
"name": "VimL",
"bytes": "459"
}
],
"symlink_target": ""
}
|
import pytest
import requests
from pyffdl.sites.story import *
@pytest.mark.parametrize(
"exception,url",
[
(requests.exceptions.MissingSchema, None),
(requests.exceptions.MissingSchema, "net"),
(requests.exceptions.InvalidURL, "https://"),
(requests.exceptions.InvalidSchema, "hppt://httpbin.org"),
(SystemExit, "http://httpbin.org/status/404"),
],
)
def test_check_setup(exception, url):
with pytest.raises(exception):
story = Story(url)
def test_check_empty_setup():
with pytest.raises(TypeError):
story = Story()
def test_prepare_style():
with pytest.raises(AttributeError):
prepare_style("style.css")
|
{
"content_hash": "c426a9e6a358d657920fe539f8de7847",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 66,
"avg_line_length": 24.137931034482758,
"alnum_prop": 0.6542857142857142,
"repo_name": "Birion/python-ffdl",
"id": "40d12634f4e5eb53821a657084ef5b6e7c571e26",
"size": "700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_story.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55544"
},
{
"name": "Sass",
"bytes": "963"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe
import requests
import socket
from frappe.model.document import Document
from frappe import _
from six.moves.urllib.parse import urlparse
class SocialLoginKeys(Document):
def validate(self):
self.validate_frappe_server_url()
def validate_frappe_server_url(self):
if self.frappe_server_url:
if self.frappe_server_url.endswith('/'):
self.frappe_server_url = self.frappe_server_url[:-1]
try:
frappe_server_hostname = urlparse(self.frappe_server_url).netloc
except:
frappe.throw(_("Check Frappe Server URL"))
if socket.gethostname() != frappe_server_hostname or \
(frappe.local.conf.domains is not None) and \
(frappe_server_hostname not in frappe.local.conf.domains):
try:
requests.get(self.frappe_server_url + "/api/method/frappe.handler.version", timeout=5)
except:
frappe.throw(_("Unable to make request to the Frappe Server URL"))
|
{
"content_hash": "30ca2bb377fb0d57833da09ae2ff1893",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 91,
"avg_line_length": 31.566666666666666,
"alnum_prop": 0.7275607180570222,
"repo_name": "paurosello/frappe",
"id": "33c8ab256000733bd6a5b8e495f1873704e8bd2e",
"size": "1099",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "frappe/integrations/doctype/social_login_keys/social_login_keys.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "422931"
},
{
"name": "HTML",
"bytes": "202357"
},
{
"name": "JavaScript",
"bytes": "1858011"
},
{
"name": "Makefile",
"bytes": "29"
},
{
"name": "Python",
"bytes": "2042290"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
}
|
"""
Utility to manage multiple :program:`celeryd` instances.
"""
from __future__ import absolute_import
from celery.bin import celeryd_multi
from djcelery.management.base import CeleryCommand
class Command(CeleryCommand):
"""Run the celery daemon."""
args = "[name1, [name2, [...]> [worker options]"
help = "Manage multiple Celery worker nodes."
requires_model_validation = True
options = ()
def run_from_argv(self, argv):
argv = self.handle_default_options(argv)
argv.append("--cmd=%s celeryd_detach" % (argv[0], ))
celeryd_multi.MultiTool().execute_from_commandline(
["%s %s" % (argv[0], argv[1])] + argv[2:])
|
{
"content_hash": "9b996ba16bc5c509f398a1acd731a09f",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 60,
"avg_line_length": 28.458333333333332,
"alnum_prop": 0.6398243045387995,
"repo_name": "softak/webfaction_demo",
"id": "01ea4fbdb8e467b4e67ace6f4f0ca7e086221ae2",
"size": "683",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "vendor-local/lib/python/djcelery/management/commands/celeryd_multi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "33283"
},
{
"name": "JavaScript",
"bytes": "984889"
},
{
"name": "Python",
"bytes": "8055804"
},
{
"name": "Shell",
"bytes": "3065"
}
],
"symlink_target": ""
}
|
"""UnitDbl module."""
#===========================================================================
# Place all imports after here.
#
from __future__ import print_function
#
# Place all imports before here.
#===========================================================================
#===========================================================================
class UnitDbl:
"""Class UnitDbl in development.
"""
#-----------------------------------------------------------------------
# Unit conversion table. Small subset of the full one but enough
# to test the required functions. First field is a scale factor to
# convert the input units to the units of the second field. Only
# units in this table are allowed.
allowed = {
"m" : ( 0.001, "km" ),
"km" : ( 1, "km" ),
"mile" : ( 1.609344, "km" ),
"rad" : ( 1, "rad" ),
"deg" : ( 1.745329251994330e-02, "rad" ),
"sec" : ( 1, "sec" ),
"min" : ( 60.0, "sec" ),
"hour" : ( 3600, "sec" ),
}
_types = {
"km" : "distance",
"rad" : "angle",
"sec" : "time",
}
#-----------------------------------------------------------------------
def __init__( self, value, units ):
"""Create a new UnitDbl object.
Units are internally converted to km, rad, and sec. The only
valid inputs for units are [ m, km, mile, rad, deg, sec, min, hour ].
The field UnitDbl.value will contain the converted value. Use
the convert() method to get a specific type of units back.
= ERROR CONDITIONS
- If the input units are not in the allowed list, an error is thrown.
= INPUT VARIABLES
- value The numeric value of the UnitDbl.
- units The string name of the units the value is in.
"""
self.checkUnits( units )
data = self.allowed[ units ]
self._value = float( value * data[0] )
self._units = data[1]
#-----------------------------------------------------------------------
def convert( self, units ):
"""Convert the UnitDbl to a specific set of units.
= ERROR CONDITIONS
- If the input units are not in the allowed list, an error is thrown.
= INPUT VARIABLES
- units The string name of the units to convert to.
= RETURN VALUE
- Returns the value of the UnitDbl in the requested units as a floating
point number.
"""
if self._units == units:
return self._value
self.checkUnits( units )
data = self.allowed[ units ]
if self._units != data[1]:
msg = "Error trying to convert to different units.\n" \
" Invalid conversion requested.\n" \
" UnitDbl: %s\n" \
" Units: %s\n" % ( str( self ), units )
raise ValueError( msg )
return self._value / data[0]
#-----------------------------------------------------------------------
def __abs__( self ):
"""Return the absolute value of this UnitDbl."""
return UnitDbl( abs( self._value ), self._units )
#-----------------------------------------------------------------------
def __neg__( self ):
"""Return the negative value of this UnitDbl."""
return UnitDbl( -self._value, self._units )
#-----------------------------------------------------------------------
def __nonzero__( self ):
"""Test a UnitDbl for a non-zero value.
= RETURN VALUE
- Returns true if the value is non-zero.
"""
return self._value.__nonzero__()
#-----------------------------------------------------------------------
def __cmp__( self, rhs ):
"""Compare two UnitDbl's.
= ERROR CONDITIONS
- If the input rhs units are not the same as our units,
an error is thrown.
= INPUT VARIABLES
- rhs The UnitDbl to compare against.
= RETURN VALUE
- Returns -1 if self < rhs, 0 if self == rhs, +1 if self > rhs.
"""
self.checkSameUnits( rhs, "compare" )
return cmp( self._value, rhs._value )
#-----------------------------------------------------------------------
def __add__( self, rhs ):
"""Add two UnitDbl's.
= ERROR CONDITIONS
- If the input rhs units are not the same as our units,
an error is thrown.
= INPUT VARIABLES
- rhs The UnitDbl to add.
= RETURN VALUE
- Returns the sum of ourselves and the input UnitDbl.
"""
self.checkSameUnits( rhs, "add" )
return UnitDbl( self._value + rhs._value, self._units )
#-----------------------------------------------------------------------
def __sub__( self, rhs ):
"""Subtract two UnitDbl's.
= ERROR CONDITIONS
- If the input rhs units are not the same as our units,
an error is thrown.
= INPUT VARIABLES
- rhs The UnitDbl to subtract.
= RETURN VALUE
- Returns the difference of ourselves and the input UnitDbl.
"""
self.checkSameUnits( rhs, "subtract" )
return UnitDbl( self._value - rhs._value, self._units )
#-----------------------------------------------------------------------
def __mul__( self, rhs ):
"""Scale a UnitDbl by a value.
= INPUT VARIABLES
- rhs The scalar to multiply by.
= RETURN VALUE
- Returns the scaled UnitDbl.
"""
return UnitDbl( self._value * rhs, self._units )
#-----------------------------------------------------------------------
def __rmul__( self, lhs ):
"""Scale a UnitDbl by a value.
= INPUT VARIABLES
- lhs The scalar to multiply by.
= RETURN VALUE
- Returns the scaled UnitDbl.
"""
return UnitDbl( self._value * lhs, self._units )
#-----------------------------------------------------------------------
def __div__( self, rhs ):
"""Divide a UnitDbl by a value.
= INPUT VARIABLES
- rhs The scalar to divide by.
= RETURN VALUE
- Returns the scaled UnitDbl.
"""
return UnitDbl( self._value / rhs, self._units )
#-----------------------------------------------------------------------
def __str__( self ):
"""Print the UnitDbl."""
return "%g *%s" % ( self._value, self._units )
#-----------------------------------------------------------------------
def __repr__( self ):
"""Print the UnitDbl."""
return "UnitDbl( %g, '%s' )" % ( self._value, self._units )
#-----------------------------------------------------------------------
def type( self ):
"""Return the type of UnitDbl data."""
return self._types[ self._units ]
#-----------------------------------------------------------------------
def range( start, stop, step=None ):
"""Generate a range of UnitDbl objects.
Similar to the Python range() method. Returns the range [
start, stop ) at the requested step. Each element will be a
UnitDbl object.
= INPUT VARIABLES
- start The starting value of the range.
- stop The stop value of the range.
- step Optional step to use. If set to None, then a UnitDbl of
value 1 w/ the units of the start is used.
= RETURN VALUE
- Returns a list contianing the requested UnitDbl values.
"""
if step is None:
step = UnitDbl( 1, start._units )
elems = []
i = 0
while True:
d = start + i * step
if d >= stop:
break
elems.append( d )
i += 1
return elems
range = staticmethod( range )
#-----------------------------------------------------------------------
def checkUnits( self, units ):
"""Check to see if some units are valid.
= ERROR CONDITIONS
- If the input units are not in the allowed list, an error is thrown.
= INPUT VARIABLES
- units The string name of the units to check.
"""
if units not in self.allowed.keys():
msg = "Input units '%s' are not one of the supported types of %s" \
% ( units, str( self.allowed.keys() ) )
raise ValueError( msg )
#-----------------------------------------------------------------------
def checkSameUnits( self, rhs, func ):
"""Check to see if units are the same.
= ERROR CONDITIONS
- If the units of the rhs UnitDbl are not the same as our units,
an error is thrown.
= INPUT VARIABLES
- rhs The UnitDbl to check for the same units
- func The name of the function doing the check.
"""
if self._units != rhs._units:
msg = "Cannot %s units of different types.\n" \
"LHS: %s\n" \
"RHS: %s" % ( func, self._units, rhs._units )
raise ValueError( msg )
#===========================================================================
|
{
"content_hash": "b5101c6355dd82f189d9858036904ee1",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 77,
"avg_line_length": 32.56583629893238,
"alnum_prop": 0.44344880340946347,
"repo_name": "RobertABT/heightmap",
"id": "d9451294666534b4c3784f8653f8239187eb4bd1",
"size": "9321",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "build/matplotlib/lib/matplotlib/testing/jpl_units/UnitDbl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "25165856"
},
{
"name": "C++",
"bytes": "5251754"
},
{
"name": "CSS",
"bytes": "17123"
},
{
"name": "FORTRAN",
"bytes": "6353469"
},
{
"name": "JavaScript",
"bytes": "816504"
},
{
"name": "M",
"bytes": "66"
},
{
"name": "Matlab",
"bytes": "4280"
},
{
"name": "Objective-C",
"bytes": "284551"
},
{
"name": "Python",
"bytes": "13223936"
},
{
"name": "TeX",
"bytes": "37261"
}
],
"symlink_target": ""
}
|
import os
import unittest
import six
import struct
import random
import bmemcached
import uuid
from bmemcached.compat import long, unicode
if six.PY3:
from unittest import mock
else:
import mock
class MemcachedTests(unittest.TestCase):
def setUp(self):
self.server = '/tmp/memcached.sock'
self.client = bmemcached.Client(self.server, 'user', 'password')
self.reset()
def tearDown(self):
self.reset()
self.client.disconnect_all()
def reset(self):
self.client.delete('test_key')
self.client.delete('test_key2')
def testSet(self):
self.assertTrue(self.client.set('test_key', 'test'))
def testSetMulti(self):
six.assertCountEqual(self, self.client.set_multi({
'test_key': 'value',
'test_key2': 'value2'}), [])
def testSetMultiBigData(self):
self.client.set_multi(
dict((unicode(k), b'value') for k in range(32767)))
def testGetSimple(self):
self.client.set('test_key', 'test')
self.assertEqual('test', self.client.get('test_key'))
def testGetDefault(self):
self.assertEqual(None, self.client.get('test_key'))
self.assertEqual('default_value', self.client.get('test_key', 'default_value'))
def testGetBytes(self):
# Ensure the code is 8-bit clean.
value = b'\x01z\x7f\x00\x80\xfe\xff\x00'
self.client.set('test_key', value)
self.assertEqual(value, self.client.get('test_key'))
def testGetDecodedText(self):
self.client.set('test_key', u'\u30b7')
self.assertEqual(u'\u30b7', self.client.get('test_key'))
def testCas(self):
value, cas = self.client.gets('nonexistant')
self.assertTrue(value is None)
self.assertTrue(cas is None)
# cas() with a cas value of None is equivalent to add.
self.assertTrue(self.client.cas('test_key', 'test', cas))
self.assertFalse(self.client.cas('test_key', 'testX', cas))
# Load the CAS key.
value, cas = self.client.gets('test_key')
self.assertEqual('test', value)
self.assertTrue(cas is not None)
# Overwrite test_key only if it hasn't changed since we read it.
self.assertTrue(self.client.cas('test_key', 'test2', cas))
self.assertEqual(self.client.get('test_key'), 'test2')
# This call won't overwrite the value, since the CAS key is out of date.
self.assertFalse(self.client.cas('test_key', 'test3', cas))
self.assertEqual(self.client.get('test_key'), 'test2')
def testCasDelete(self):
self.assertTrue(self.client.set('test_key', 'test'))
value, cas = self.client.gets('test_key')
# If a different CAS value is supplied, the key is not deleted.
self.assertFalse(self.client.delete('test_key', cas=cas + 1))
self.assertEqual('test', self.client.get('test_key'))
# If the correct CAS value is supplied, the key is deleted.
self.assertTrue(self.client.delete('test_key', cas=cas))
self.assertEqual(None, self.client.get('test_key'))
def testMultiCas(self):
# Set multiple values, some using CAS and some not. True is returned, because
# both values were stored.
six.assertCountEqual(self, self.client.set_multi({
('test_key', 0): 'value1',
'test_key2': 'value2',
}), [])
self.assertEqual(self.client.get('test_key'), 'value1')
self.assertEqual(self.client.get('test_key2'), 'value2')
# A CAS value of 0 means add. The value already exists, so this won't overwrite it.
# ['test_key'] is returned, because test_key is not stored, but test_key2 is still stored.
six.assertCountEqual(self, self.client.set_multi({
('test_key', 0): 'value3',
'test_key2': 'value3',
}), [('test_key', 0)])
self.assertEqual(self.client.get('test_key'), 'value1')
self.assertEqual(self.client.get('test_key2'), 'value3')
# Update with the correct CAS value.
value, cas = self.client.gets('test_key')
six.assertCountEqual(self, self.client.set_multi({
('test_key', cas): 'value4',
}), [])
self.assertEqual(self.client.get('test_key'), 'value4')
def testGetMultiCas(self):
self.client.set('test_key', 'value1')
self.client.set('test_key2', 'value2')
value1, cas1 = self.client.gets('test_key')
value2, cas2 = self.client.gets('test_key2')
# Batch retrieve items and their CAS values, and verify that they match
# the values we got by looking them up individually.
values = self.client.get_multi(['test_key', 'test_key2'], get_cas=True)
self.assertEqual(values.get('test_key')[0], 'value1')
self.assertEqual(values.get('test_key2')[0], 'value2')
def testGetEmptyString(self):
self.client.set('test_key', '')
self.assertEqual('', self.client.get('test_key'))
def testGetUnicodeString(self):
self.client.set('test_key', u'\xac')
self.assertEqual(u'\xac', self.client.get('test_key'))
def testGetMulti(self):
six.assertCountEqual(self, self.client.set_multi({
'test_key': 'value',
'test_key2': 'value2'
}), [])
self.assertEqual({'test_key': 'value', 'test_key2': 'value2'},
self.client.get_multi(['test_key', 'test_key2']))
self.assertEqual({'test_key': 'value', 'test_key2': 'value2'},
self.client.get_multi(['test_key', 'test_key2', 'nothere']))
def testGetLong(self):
self.client.set('test_key', long(1))
value = self.client.get('test_key')
self.assertEqual(long(1), value)
self.assertTrue(isinstance(value, long))
def testGetInteger(self):
self.client.set('test_key', 1)
value = self.client.get('test_key')
self.assertEqual(1, value)
self.assertTrue(isinstance(value, int))
def testGetBoolean(self):
self.client.set('test_key', True)
self.assertTrue(self.client.get('test_key') is True)
def testGetObject(self):
self.client.set('test_key', {'a': 1})
value = self.client.get('test_key')
self.assertTrue(isinstance(value, dict))
self.assertTrue('a' in value)
self.assertEqual(1, value['a'])
def testDelete(self):
self.client.set('test_key', 'test')
self.assertTrue(self.client.delete('test_key'))
self.assertEqual(None, self.client.get('test_key'))
def testDeleteMulti(self):
self.client.set_multi({
'test_key': 'value',
'test_key2': 'value2'})
self.assertTrue(self.client.delete_multi(['test_key', 'test_key2']))
def testDeleteUnknownKey(self):
self.assertTrue(self.client.delete('test_key'))
def testAddPass(self):
self.assertTrue(self.client.add('test_key', 'test'))
def testAddFail(self):
self.client.add('test_key', 'value')
self.assertFalse(self.client.add('test_key', 'test'))
def testReplacePass(self):
self.client.add('test_key', 'value')
self.assertTrue(self.client.replace('test_key', 'value2'))
self.assertEqual('value2', self.client.get('test_key'))
def testReplaceFail(self):
self.assertFalse(self.client.replace('test_key', 'value'))
def testIncrement(self):
self.assertEqual(0, self.client.incr('test_key', 1))
self.assertEqual(1, self.client.incr('test_key', 1))
def testIncrementInitialize(self):
self.assertEqual(10, self.client.incr('test_key', 1, default=10))
self.assertEqual(11, self.client.incr('test_key', 1, default=10))
def testDecrement(self):
self.assertEqual(0, self.client.decr('test_key', 1))
self.assertEqual(0, self.client.decr('test_key', 1))
def testDecrementInitialize(self):
self.assertEqual(10, self.client.decr('test_key', 1, default=10))
self.assertEqual(9, self.client.decr('test_key', 1, default=10))
def testFlush(self):
self.client.set('test_key', 'test')
self.assertTrue(self.client.flush_all())
self.assertEqual(None, self.client.get('test_key'))
def testStats(self):
stats = self.client.stats()[self.server]
self.assertTrue('pid' in stats)
stats = self.client.stats('settings')[self.server]
self.assertTrue('verbosity' in stats)
def testReconnect(self):
self.client.set('test_key', 'test')
self.client.disconnect_all()
self.assertEqual('test', self.client.get('test_key'))
class TimeoutMemcachedTests(unittest.TestCase):
def setUp(self):
self.server = '{}:11211'.format(os.environ['MEMCACHED_HOST'])
self.client = None
def tearDown(self):
if self.client:
self.client.disconnect_all()
client = bmemcached.Client(self.server, 'user', 'password',
socket_timeout=None)
client.delete('timeout_key')
client.delete('timeout_key_none')
client.disconnect_all()
def testTimeout(self):
self.client = bmemcached.Client(self.server, 'user', 'password',
socket_timeout=0.00000000000001)
for proto in self.client._servers:
# Set up a mock connection that gives the impression of
# timing out in every recv() call.
proto.connection = mock.Mock()
proto.connection.recv.return_value = b''
self.client.set('timeout_key', 'test')
self.assertEqual(self.client.get('timeout_key'), None)
def testTimeoutNone(self):
self.client = bmemcached.Client(self.server, 'user', 'password',
socket_timeout=None)
self.client.set('test_key_none', 'test')
self.assertEqual(self.client.get('test_key_none'), 'test')
class BinaryMemcachedTests(unittest.TestCase):
def setUp(self):
self.server = '/tmp/memcached.sock'
self.client = bmemcached.Client(self.server, 'user', 'password')
self._inserted_keys = list()
self.reset()
def tearDown(self):
self.reset()
self.client.disconnect_all()
def bkey(self):
packed = struct.pack("<Q", int("%s%s%s%s" % (random.randint(1000, 9999),
random.randint(1000, 9999),
random.randint(1000, 9999),
random.randint(1000, 9999))))
self._inserted_keys.append(packed)
return packed
def skey(self):
key = str(uuid.uuid4())[0:8]
self._inserted_keys.append(key)
return key
def reset(self):
for test_key in self._inserted_keys:
self.client.delete(test_key)
def testSet(self):
self.assertTrue(self.client.set(self.bkey(), 'test'))
self.assertTrue(self.client.set(self.skey(), 'test'))
def testSetMulti(self):
six.assertCountEqual(self, self.client.set_multi({
self.bkey(): 'value',
self.skey(): 'value2',
self.bkey(): 'value3'}), [])
def testSetMultiBigData(self):
self.client.set_multi(
dict((self.bkey(), b'value') for _ in range(32767)))
self.client.set_multi(
dict((self.skey(), b'value') for _ in range(32767)))
def testGetSimple(self):
key = self.bkey()
self.client.set(key, 'test')
self.assertEqual('test', self.client.get(key))
key = self.skey()
self.client.set(key, 'test')
self.assertEqual('test', self.client.get(key))
def testGetBytes(self):
test_key = self.bkey()
# Ensure the code is 8-bit clean.
value = b'\x01z\x7f\x00\x80\xfe\xff\x00'
self.client.set(test_key, value)
self.assertEqual(value, self.client.get(test_key))
def testGetDecodedText(self):
test_key = self.bkey()
self.client.set(test_key, u'\u30b7')
self.assertEqual(u'\u30b7', self.client.get(test_key))
def testCas(self):
value, cas = self.client.gets('nonexistant')
self.assertTrue(value is None)
self.assertTrue(cas is None)
# cas() with a cas value of None is equivalent to add.
test_key = self.bkey()
self.assertTrue(self.client.cas(test_key, 'test', cas))
self.assertFalse(self.client.cas(test_key, 'testX', cas))
# Load the CAS key.
value, cas = self.client.gets(test_key)
self.assertEqual('test', value)
self.assertTrue(cas is not None)
# Overwrite test_key only if it hasn't changed since we read it.
self.assertTrue(self.client.cas(test_key, 'test2', cas))
self.assertEqual(self.client.get(test_key), 'test2')
# This call won't overwrite the value, since the CAS key is out of date.
self.assertFalse(self.client.cas(test_key, 'test3', cas))
self.assertEqual(self.client.get(test_key), 'test2')
def testCasDelete(self):
test_key = self.bkey()
self.assertTrue(self.client.set(test_key, 'test'))
value, cas = self.client.gets(test_key)
# If a different CAS value is supplied, the key is not deleted.
self.assertFalse(self.client.delete(test_key, cas=cas + 1))
self.assertEqual('test', self.client.get(test_key))
# If the correct CAS value is supplied, the key is deleted.
self.assertTrue(self.client.delete(test_key, cas=cas))
self.assertEqual(None, self.client.get(test_key))
def testMultiCas(self):
# Set multiple values, some using CAS and some not. True is returned, because
# both values were stored.
test_key1 = self.bkey()
test_key2 = self.bkey()
six.assertCountEqual(self, self.client.set_multi({
(test_key1, 0): 'value1',
test_key2: 'value2',
}), [])
self.assertEqual(self.client.get(test_key1), 'value1')
self.assertEqual(self.client.get(test_key2), 'value2')
# A CAS value of 0 means add. The value already exists, so this won't overwrite it.
# [test_key1] is returned, because test_key1 is not stored, but test_key2 is still stored.
six.assertCountEqual(self, self.client.set_multi({
(test_key1, 0): 'value3',
test_key2: 'value3',
}), [(test_key1, 0)])
self.assertEqual(self.client.get(test_key1), 'value1')
self.assertEqual(self.client.get(test_key2), 'value3')
# Update with the correct CAS value.
value, cas = self.client.gets(self.bkey())
six.assertCountEqual(self, self.client.set_multi({
(test_key1, cas): 'value4',
}), [])
self.assertEqual(self.client.get(test_key1), 'value4')
def testGetMultiCas(self):
for _ in range(0, 100):
test_key1 = self.bkey()
test_key2 = self.bkey()
test_key3 = self.skey()
self.client.set(test_key1, 'value1')
self.client.set(test_key2, 'value2')
value1, cas1 = self.client.gets(test_key1)
value2, cas2 = self.client.gets(test_key2)
# Batch retrieve items and their CAS values, and verify that they match
# the values we got by looking them up individually.
values = self.client.get_multi([test_key1, test_key2, test_key3], get_cas=True)
self.assertEqual(values.get(test_key1)[0], 'value1')
self.assertEqual(values.get(test_key2)[0], 'value2')
def testGetEmptyString(self):
test_key = self.bkey()
self.client.set(test_key, '')
self.assertEqual('', self.client.get(test_key))
def testGetUnicodeString(self):
test_key = self.bkey()
self.client.set(test_key, u'\xac')
self.assertEqual(u'\xac', self.client.get(test_key))
def testGetMulti(self):
test_key1 = self.bkey()
test_key2 = self.bkey()
test_key3 = self.skey()
test_key4 = self.skey()
six.assertCountEqual(self, self.client.set_multi({
test_key1: 'value',
test_key2: 'value2',
test_key3: 'value3',
test_key4: 'value4'
}), [])
self.assertEqual({test_key1: 'value', test_key2: 'value2', test_key3: 'value3'},
self.client.get_multi([test_key1, test_key2, test_key3]))
self.assertEqual({test_key1: 'value', test_key2: 'value2', test_key3: 'value3', test_key4: 'value4'},
self.client.get_multi([test_key1, test_key2, test_key3, test_key4]))
self.assertEqual({test_key1: 'value', test_key2: 'value2'},
self.client.get_multi([test_key1, test_key2, 'nothere']))
def testGetLong(self):
test_key = self.bkey()
self.client.set(test_key, long(1))
value = self.client.get(test_key)
self.assertEqual(long(1), value)
self.assertTrue(isinstance(value, long))
def testGetInteger(self):
test_key = self.bkey()
self.client.set(test_key, 1)
value = self.client.get(test_key)
self.assertEqual(1, value)
self.assertTrue(isinstance(value, int))
def testGetBoolean(self):
test_key = self.bkey()
self.client.set(test_key, True)
self.assertTrue(self.client.get(test_key) is True)
def testGetObject(self):
test_key = self.bkey()
self.client.set(test_key, {'a': 1})
value = self.client.get(test_key)
self.assertTrue(isinstance(value, dict))
self.assertTrue('a' in value)
self.assertEqual(1, value['a'])
def testDelete(self):
test_key = self.bkey()
self.client.set(test_key, 'test')
self.assertTrue(self.client.delete(test_key))
self.assertEqual(None, self.client.get(test_key))
def testDeleteMulti(self):
test_key1 = self.bkey()
test_key2 = self.bkey()
self.client.set_multi({
test_key1: 'value',
test_key2: 'value2'})
self.assertTrue(self.client.delete_multi([test_key1, test_key2]))
def testDeleteUnknownKey(self):
test_key = self.bkey()
self.assertTrue(self.client.delete(test_key))
def testAddPass(self):
test_key = self.bkey()
self.assertTrue(self.client.add(test_key, 'test'))
def testAddFail(self):
test_key = self.bkey()
self.client.add(test_key, 'value')
self.assertFalse(self.client.add(test_key, 'test'))
def testReplacePass(self):
test_key = self.bkey()
self.client.add(test_key, 'value')
self.assertTrue(self.client.replace(test_key, 'value2'))
self.assertEqual('value2', self.client.get(test_key))
def testReplaceFail(self):
test_key = self.bkey()
self.assertFalse(self.client.replace(test_key, 'value'))
def testIncrement(self):
test_key = self.bkey()
self.assertEqual(0, self.client.incr(test_key, 1))
self.assertEqual(1, self.client.incr(test_key, 1))
def testDecrement(self):
test_key = self.bkey()
self.assertEqual(0, self.client.decr(test_key, 1))
self.assertEqual(0, self.client.decr(test_key, 1))
def testFlush(self):
test_key = self.bkey()
self.client.set(test_key, 'test')
self.assertTrue(self.client.flush_all())
self.assertEqual(None, self.client.get(test_key))
def testStats(self):
stats = self.client.stats()[self.server]
self.assertTrue('pid' in stats)
stats = self.client.stats('settings')[self.server]
self.assertTrue('verbosity' in stats)
def testReconnect(self):
test_key = self.bkey()
self.client.set(test_key, 'test')
self.client.disconnect_all()
self.assertEqual('test', self.client.get(test_key))
class DistributedClient(MemcachedTests):
def setUp(self):
self.server = '{}:11211'.format(os.environ['MEMCACHED_HOST'])
self.client = bmemcached.DistributedClient([self.server], 'user', 'password')
self.reset()
|
{
"content_hash": "3a8296e4df589289637646becdfe69a3",
"timestamp": "",
"source": "github",
"line_count": 550,
"max_line_length": 109,
"avg_line_length": 36.972727272727276,
"alnum_prop": 0.5990656503565281,
"repo_name": "jaysonsantos/python-binary-memcached",
"id": "f69ce26179320367cb8bbd781f983dcab372b39f",
"size": "20335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_simple_functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "108932"
},
{
"name": "Shell",
"bytes": "479"
}
],
"symlink_target": ""
}
|
from datetime import datetime
import pytz
from unittest.mock import patch
from django.test import SimpleTestCase
import testil
from corehq.apps.users.models import WebUser, DomainMembership
from corehq.util.timezones.utils import get_timezone_for_user, parse_date
DOMAIN_TIMEZONE = pytz.timezone('Asia/Kolkata')
@patch('corehq.util.timezones.utils.get_timezone_for_domain', lambda x: DOMAIN_TIMEZONE)
@patch('corehq.apps.users.models._AuthorizableMixin.get_domain_membership')
class GetTimezoneForUserTest(SimpleTestCase):
def test_no_user(self, _):
self.assertEqual(get_timezone_for_user(None, "test"), DOMAIN_TIMEZONE)
def test_user_with_no_domain_membership(self, domain_membership_mock):
couch_user = WebUser()
domain_membership_mock.return_value = None
self.assertEqual(get_timezone_for_user(couch_user, "test"), DOMAIN_TIMEZONE)
def test_user_with_domain_membership(self, domain_membership_mock):
couch_user = WebUser()
domain_membership = DomainMembership()
domain_membership_timezone = pytz.timezone('America/New_York')
domain_membership.timezone = 'America/New_York'
domain_membership_mock.return_value = domain_membership
# if not override_global_tz
self.assertEqual(get_timezone_for_user(couch_user, "test"), DOMAIN_TIMEZONE)
# if override_global_tz
domain_membership.override_global_tz = True
self.assertEqual(get_timezone_for_user(couch_user, "test"), domain_membership_timezone)
def test_parse_date_iso_datetime():
parsed = parse_date('2022-04-06T12:13:14Z')
testil.eq(parsed, datetime(2022, 4, 6, 12, 13, 14))
# `date` is timezone naive
testil.eq(parsed.tzinfo, None)
def test_parse_date_noniso_datetime():
parsed = parse_date('Apr 06, 2022 12:13:14 UTC')
testil.eq(parsed, datetime(2022, 4, 6, 12, 13, 14))
# `date` is timezone naive
testil.eq(parsed.tzinfo, None)
def test_parse_date_date():
parsed = parse_date('2022-04-06')
testil.eq(parsed, datetime(2022, 4, 6, 0, 0, 0))
def test_parse_date_str():
parsed = parse_date('broken')
testil.eq(parsed, 'broken')
def test_parse_date_none():
parsed = parse_date(None)
testil.eq(parsed, None)
def test_parse_date_int():
parsed = parse_date(4)
testil.eq(parsed, 4)
|
{
"content_hash": "e1d48cf489b0bffdf751572db1d0659b",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 95,
"avg_line_length": 32.44444444444444,
"alnum_prop": 0.6990582191780822,
"repo_name": "dimagi/commcare-hq",
"id": "a0dfc518fea8c567e944ad2f359f57fc280863cd",
"size": "2336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/util/timezones/tests/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
import unittest
from pymap.mime import MessageContent
from pymap.mime.cte import MessageDecoder
_7bit_body = b"""Testing 7bit\n"""
_8bit_body = b"""Testing\x008bit\x00\n"""
_qp_body = b"""Testing=01Quoted=3DPrintable\n"""
_b64_body = b"""VGVzdGluZwEACkJhc2UgNjQgCg==\n"""
class TestMessageDecoder(unittest.TestCase):
def test_7bit_cte(self) -> None:
data = b'\n' + _7bit_body
msg = MessageContent.parse(data)
decoded = MessageDecoder.of(msg.header).decode(msg.body)
self.assertEqual(b'Testing 7bit\n', bytes(decoded))
def test_8bit_cte(self) -> None:
data = b'Content-Transfer-Encoding: 8bit\n\n' + _8bit_body
msg = MessageContent.parse(data)
decoded = MessageDecoder.of(msg.header).decode(msg.body)
self.assertEqual(b'Testing\x008bit\x00\n', bytes(decoded))
def test_quopri_cte(self) -> None:
data = b'Content-Transfer-Encoding: quoted-printable\n\n' + _qp_body
msg = MessageContent.parse(data)
decoded = MessageDecoder.of(msg.header).decode(msg.body)
self.assertEqual(b'Testing\x01Quoted=Printable\n', bytes(decoded))
def test_base64_cte(self) -> None:
data = b'Content-Transfer-Encoding: base64\n\n' + _b64_body
msg = MessageContent.parse(data)
decoded = MessageDecoder.of(msg.header).decode(msg.body)
self.assertEqual(b'Testing\x01\x00\nBase 64 \n', bytes(decoded))
|
{
"content_hash": "ee0009b0ebac0ea282edde0383b9968c",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 76,
"avg_line_length": 39.5,
"alnum_prop": 0.6708860759493671,
"repo_name": "icgood/pymap",
"id": "e1a0066eab91c7b9f077f5a7735cbf728d1176c5",
"size": "1423",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/test_mime_cte.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "390"
},
{
"name": "Lua",
"bytes": "15194"
},
{
"name": "Python",
"bytes": "857930"
},
{
"name": "Shell",
"bytes": "389"
}
],
"symlink_target": ""
}
|
from six import iteritems
class ResourceBase(dict):
def __init__(self, parent, name=None):
self.__parent__ = parent
self.__name__ = name
self.__root = None
@property
def parent(self):
return self.__parent__
@property
def name(self):
return self.__name__
@property
def root(self):
if self.__root is None:
self.__root = self.__get_root(self)
return self.__root
@classmethod
def __get_root(cls, instance):
if instance.__parent__ is None:
return instance
else:
return cls.__get_root(instance.__parent__)
class Resource(ResourceBase):
@classmethod
def get_child_definitions(cls):
try:
cls.__child_definitions
except AttributeError:
cls.__child_definitions = {}
return cls.__child_definitions
@classmethod
def register_child(cls, name, definition):
cls.get_child_definitions()[name] = definition
@classmethod
def get_child_definition(cls, name):
return cls.get_child_definitions()[name]
@staticmethod
def children(definitions):
def f(clazz):
for k, v in iteritems(definitions):
clazz.register_child(k, v)
return clazz
return f
def __getitem__(self, key):
return self.setdefault(key, self.__make_child(key))
def __make_child(self, key):
definition = self.get_child_definition(key)
return definition(self, key)
|
{
"content_hash": "26b44ff467a725950421ff5474531a3c",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 59,
"avg_line_length": 24.15625,
"alnum_prop": 0.5763260025873221,
"repo_name": "xica/hieratic",
"id": "109dada13730c5b323c117fb355ee1b12f6dc290",
"size": "1546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hieratic/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28115"
}
],
"symlink_target": ""
}
|
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("SVC_sigmoid" , "BinaryClass_10" , "duckdb")
|
{
"content_hash": "3843a004152637d52bafc76de4f738fb",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 72,
"avg_line_length": 35.25,
"alnum_prop": 0.7730496453900709,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "5f0aadb4ef760886cbf58d903f457f1b50be558b",
"size": "141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/classification/BinaryClass_10/ws_BinaryClass_10_SVC_sigmoid_duckdb_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
}
|
"""Field handler classes.
The field handlers are meant to parse information from or do some other generic
action for a specific field type for the build_api script.
"""
from __future__ import print_function
import contextlib
import functools
import os
import shutil
import sys
from google.protobuf import message as protobuf_message
from chromite.api.controller import controller_util
from chromite.api.gen.chromiumos import common_pb2
from chromite.lib import cros_logging as logging
from chromite.lib import osutils
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
class Error(Exception):
"""Base error class for the module."""
class InvalidResultPathError(Error):
"""Result path is invalid."""
class ChrootHandler(object):
"""Translate a Chroot message to chroot enter arguments and env."""
def __init__(self, clear_field):
self.clear_field = clear_field
def handle(self, message):
"""Parse a message for a chroot field."""
# Find the Chroot field. Search for the field by type to prevent it being
# tied to a naming convention.
for descriptor in message.DESCRIPTOR.fields:
field = getattr(message, descriptor.name)
if isinstance(field, common_pb2.Chroot):
chroot = field
if self.clear_field:
message.ClearField(descriptor.name)
return self.parse_chroot(chroot)
return None
def parse_chroot(self, chroot_message):
"""Parse a Chroot message instance."""
return controller_util.ParseChroot(chroot_message)
def handle_chroot(message, clear_field=True):
"""Find and parse the chroot field, returning the Chroot instance.
Returns:
chroot_lib.Chroot
"""
handler = ChrootHandler(clear_field)
chroot = handler.handle(message)
if chroot:
return chroot
logging.warning('No chroot message found, falling back to defaults.')
return handler.parse_chroot(common_pb2.Chroot())
def handle_goma(message, chroot_path):
"""Find and parse the GomaConfig field, returning the Goma instance."""
for descriptor in message.DESCRIPTOR.fields:
field = getattr(message, descriptor.name)
if isinstance(field, common_pb2.GomaConfig):
goma_config = field
return controller_util.ParseGomaConfig(goma_config, chroot_path)
return None
class PathHandler(object):
"""Handles copying a file or directory into or out of the chroot."""
INSIDE = common_pb2.Path.INSIDE
OUTSIDE = common_pb2.Path.OUTSIDE
def __init__(self, field, destination, delete, prefix=None, reset=True):
"""Path handler initialization.
Args:
field (common_pb2.Path): The Path message.
destination (str): The destination base path.
delete (bool): Whether the copied file(s) should be deleted on cleanup.
prefix (str|None): A path prefix to remove from the destination path
when moving files inside the chroot, or to add to the source paths when
moving files out of the chroot.
reset (bool): Whether to reset the state on cleanup.
"""
assert isinstance(field, common_pb2.Path)
assert field.path
assert field.location
self.field = field
self.destination = destination
self.prefix = prefix or ''
self.delete = delete
self.tempdir = None
self.reset = reset
# For resetting the state.
self._transferred = False
self._original_message = common_pb2.Path()
self._original_message.CopyFrom(self.field)
def transfer(self, direction):
"""Copy the file or directory to its destination.
Args:
direction (int): The direction files are being copied (into or out of
the chroot). Specifying the direction allows avoiding performing
unnecessary copies.
"""
if self._transferred:
return
assert direction in [self.INSIDE, self.OUTSIDE]
if self.field.location == direction:
# Already in the correct location, nothing to do.
return
# Create a tempdir for the copied file if we're cleaning it up afterwords.
if self.delete:
self.tempdir = osutils.TempDir(base_dir=self.destination)
destination = self.tempdir.tempdir
else:
destination = self.destination
source = self.field.path
if direction == self.OUTSIDE and self.prefix:
# When we're extracting files, we need /tmp/result to be
# /path/to/chroot/tmp/result.
source = os.path.join(self.prefix, source.lstrip(os.sep))
if os.path.isfile(source):
# File - use the old file name, just copy it into the destination.
dest_path = os.path.join(destination, os.path.basename(source))
copy_fn = shutil.copy
else:
# Directory - just copy everything into the new location.
dest_path = destination
copy_fn = functools.partial(osutils.CopyDirContents, allow_nonempty=True)
logging.debug('Copying %s to %s', source, dest_path)
copy_fn(source, dest_path)
# Clean up the destination path for returning, if applicable.
return_path = dest_path
if direction == self.INSIDE and return_path.startswith(self.prefix):
return_path = return_path[len(self.prefix):]
self.field.path = return_path
self.field.location = direction
self._transferred = True
def cleanup(self):
if self.tempdir:
self.tempdir.Cleanup()
self.tempdir = None
if self.reset:
self.field.CopyFrom(self._original_message)
class SyncedDirHandler(object):
"""Handler for syncing directories across the chroot boundary."""
def __init__(self, field, destination, prefix):
self.field = field
self.prefix = prefix
self.source = self.field.dir
if not self.source.endswith(os.sep):
self.source += os.sep
self.destination = destination
if not self.destination.endswith(os.sep):
self.destination += os.sep
# For resetting the message later.
self._original_message = common_pb2.SyncedDir()
self._original_message.CopyFrom(self.field)
def _sync(self, src, dest):
logging.info('Syncing %s to %s', src, dest)
# TODO: This would probably be more efficient with rsync.
osutils.EmptyDir(dest)
osutils.CopyDirContents(src, dest)
def sync_in(self):
"""Sync files from the source directory to the destination directory."""
self._sync(self.source, self.destination)
self.field.dir = '/%s' % os.path.relpath(self.destination, self.prefix)
def sync_out(self):
"""Sync files from the destination directory to the source directory."""
self._sync(self.destination, self.source)
self.field.CopyFrom(self._original_message)
@contextlib.contextmanager
def copy_paths_in(message, destination, delete=True, prefix=None):
"""Context manager function to transfer and cleanup all Path messages.
Args:
message (Message): A message whose Path messages should be transferred.
destination (str): The base destination path.
delete (bool): Whether the file(s) should be deleted.
prefix (str|None): A prefix path to remove from the final destination path
in the Path message (i.e. remove the chroot path).
Returns:
list[PathHandler]: The path handlers.
"""
assert destination
handlers = _extract_handlers(message, destination, prefix, delete=delete,
reset=True)
for handler in handlers:
handler.transfer(PathHandler.INSIDE)
try:
yield handlers
finally:
for handler in handlers:
handler.cleanup()
@contextlib.contextmanager
def sync_dirs(message, destination, prefix):
"""Context manager function to handle SyncedDir messages.
The sync semantics are effectively:
rsync -r --del source/ destination/
* The endpoint runs. *
rsync -r --del destination/ source/
Args:
message (Message): A message whose SyncedPath messages should be synced.
destination (str): The destination path.
prefix (str): A prefix path to remove from the final destination path
in the Path message (i.e. remove the chroot path).
Returns:
list[SyncedDirHandler]: The handlers.
"""
assert destination
handlers = _extract_handlers(message, destination, prefix=prefix,
delete=False, reset=True,
message_type=common_pb2.SyncedDir)
for handler in handlers:
handler.sync_in()
try:
yield handlers
finally:
for handler in handlers:
handler.sync_out()
def extract_results(request_message, response_message, chroot):
"""Transfer all response Path messages to the request's ResultPath.
Args:
request_message (Message): The request message containing a ResultPath
message.
response_message (Message): The response message whose Path message(s)
are to be transferred.
chroot (chroot_lib.Chroot): The chroot the files are being copied out of.
"""
# Find the ResultPath.
for descriptor in request_message.DESCRIPTOR.fields:
field = getattr(request_message, descriptor.name)
if isinstance(field, common_pb2.ResultPath):
result_path_message = field
break
else:
# No ResultPath to handle.
return
destination = result_path_message.path.path
handlers = _extract_handlers(response_message, destination, chroot.path,
delete=False, reset=False)
for handler in handlers:
handler.transfer(PathHandler.OUTSIDE)
handler.cleanup()
def _extract_handlers(message, destination, prefix, delete=False, reset=False,
field_name=None, message_type=None):
"""Recursive helper for handle_paths to extract Path messages."""
message_type = message_type or common_pb2.Path
is_path_target = message_type is common_pb2.Path
is_synced_target = message_type is common_pb2.SyncedDir
is_message = isinstance(message, protobuf_message.Message)
is_result_path = isinstance(message, common_pb2.ResultPath)
if not is_message or is_result_path:
# Base case: Nothing to handle.
# There's nothing we can do with scalar values.
# Skip ResultPath instances to avoid unnecessary file copying.
return []
elif is_path_target and isinstance(message, common_pb2.Path):
# Base case: Create handler for this message.
if not message.path or not message.location:
logging.debug('Skipping %s; incomplete.', field_name or 'message')
return []
handler = PathHandler(message, destination, delete=delete, prefix=prefix,
reset=reset)
return [handler]
elif is_synced_target and isinstance(message, common_pb2.SyncedDir):
if not message.dir:
logging.debug('Skipping %s; no directory given.', field_name or 'message')
return []
handler = SyncedDirHandler(message, destination, prefix)
return [handler]
# Iterate through each field and recurse.
handlers = []
for descriptor in message.DESCRIPTOR.fields:
field = getattr(message, descriptor.name)
if field_name:
new_field_name = '%s.%s' % (field_name, descriptor.name)
else:
new_field_name = descriptor.name
if isinstance(field, protobuf_message.Message):
# Recurse for nested Paths.
handlers.extend(
_extract_handlers(field, destination, prefix, delete, reset,
field_name=new_field_name,
message_type=message_type))
else:
# If it's iterable it may be a repeated field, try each element.
try:
iterator = iter(field)
except TypeError:
# Definitely not a repeated field, just move on.
continue
for element in iterator:
handlers.extend(
_extract_handlers(element, destination, prefix, delete, reset,
field_name=new_field_name,
message_type=message_type))
return handlers
|
{
"content_hash": "a75018dabf2af47316fd70e9342d7d94",
"timestamp": "",
"source": "github",
"line_count": 367,
"max_line_length": 80,
"avg_line_length": 32.10354223433242,
"alnum_prop": 0.6857918859276863,
"repo_name": "endlessm/chromium-browser",
"id": "0eff8c832e9a3a0d1425e08d70504dbd16e6195d",
"size": "11972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/chromite/api/field_handler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from django.db import models
from django.contrib.auth.models import AbstractUser
class RadiusUser(AbstractUser):
pass
RadiusUser._meta.get_field('username').max_length = 255
|
{
"content_hash": "7770577062cf41b3fa54cff642adc5cd",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 55,
"avg_line_length": 25.285714285714285,
"alnum_prop": 0.8022598870056498,
"repo_name": "DanielGabris/radius_restserver",
"id": "5f21cdeb2159a5be7e29a4a0366a4aad8db86469",
"size": "177",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/radauth/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1410"
},
{
"name": "HTML",
"bytes": "12060"
},
{
"name": "JavaScript",
"bytes": "44972"
},
{
"name": "Python",
"bytes": "48732"
},
{
"name": "Shell",
"bytes": "1181"
}
],
"symlink_target": ""
}
|
"""Libvirt volume driver for iSCSI"""
from os_brick import exception as os_brick_exception
from os_brick.initiator import connector
from oslo_log import log as logging
import nova.conf
from nova.i18n import _LW
from nova import utils
from nova.virt.libvirt.volume import volume as libvirt_volume
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class LibvirtISCSIVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, host):
super(LibvirtISCSIVolumeDriver, self).__init__(host,
is_block_dev=True)
# Call the factory here so we can support
# more than x86 architectures.
self.connector = connector.InitiatorConnector.factory(
'ISCSI', utils.get_root_helper(),
use_multipath=CONF.libvirt.volume_use_multipath,
device_scan_attempts=CONF.libvirt.num_volume_scan_tries,
transport=self._get_transport())
def _get_transport(self):
if CONF.libvirt.iscsi_iface:
transport = CONF.libvirt.iscsi_iface
else:
transport = 'default'
return transport
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtISCSIVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
conf.driver_io = "native"
return conf
def connect_volume(self, connection_info, disk_info, instance):
"""Attach the volume to instance_name."""
LOG.debug("Calling os-brick to attach iSCSI Volume")
device_info = self.connector.connect_volume(connection_info['data'])
LOG.debug("Attached iSCSI volume %s", device_info)
connection_info['data']['device_path'] = device_info['path']
def disconnect_volume(self, connection_info, disk_dev, instance):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach iSCSI Volume")
try:
self.connector.disconnect_volume(connection_info['data'], None)
except os_brick_exception.VolumeDeviceNotFound as exc:
LOG.warning(_LW('Ignoring VolumeDeviceNotFound: %s'), exc)
return
LOG.debug("Disconnected iSCSI Volume %s", disk_dev)
super(LibvirtISCSIVolumeDriver,
self).disconnect_volume(connection_info, disk_dev, instance)
|
{
"content_hash": "7c89af30f627cde4c44a484d3b464291",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 76,
"avg_line_length": 36.68571428571428,
"alnum_prop": 0.6456386292834891,
"repo_name": "rajalokan/nova",
"id": "dfe72a2c3625363778f381d7bf865fd242b7d87e",
"size": "3140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/virt/libvirt/volume/iscsi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "4503"
},
{
"name": "Python",
"bytes": "19100322"
},
{
"name": "Shell",
"bytes": "26793"
},
{
"name": "Smarty",
"bytes": "299237"
}
],
"symlink_target": ""
}
|
'''
(*)~---------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2017 Pupil Labs
Distributed under the terms of the GNU
Lesser General Public License (LGPL v3.0).
See COPYING and COPYING.LESSER for license details.
---------------------------------------------------------------------------~(*)
'''
from player_methods import transparent_circle
from plugin import Plugin
import numpy as np
# TODO: Import pyglui
from pyglui import ui
from methods import denormalize
class Vis_Circle(Plugin):
uniqueness = "not_unique"
def __init__(self, g_pool,radius=20,color=(0.0,0.7,0.25,0.2),thickness=2,fill=True):
super().__init__(g_pool)
self.order = .9
# initialize empty menu
self.menu = None
self.r = color[0]
self.g = color[1]
self.b = color[2]
self.a = color[3]
self.radius = radius
self.thickness = thickness
self.fill = fill
def update(self,frame,events):
if self.fill:
thickness = -1
else:
thickness = self.thickness
pts = [denormalize(pt['norm_pos'],frame.img.shape[:-1][::-1],flip_y=True) for pt in events.get('gaze_positions',[]) if pt['confidence']>=self.g_pool.min_data_confidence]
for pt in pts:
transparent_circle(frame.img, pt, radius=self.radius, color=(self.b, self.g, self.r, self.a), thickness=thickness)
def init_gui(self):
# initialize the menu
self.menu = ui.Scrolling_Menu('Gaze Circle')
# add menu to the window
self.g_pool.gui.append(self.menu)
self.menu.append(ui.Button('Close',self.unset_alive))
self.menu.append(ui.Slider('radius',self,min=1,step=1,max=100,label='Radius'))
self.menu.append(ui.Slider('thickness',self,min=1,step=1,max=15,label='Stroke width'))
self.menu.append(ui.Switch('fill',self,label='Fill'))
color_menu = ui.Growing_Menu('Color')
color_menu.collapsed = True
color_menu.append(ui.Info_Text('Set RGB color components and alpha (opacity) values.'))
color_menu.append(ui.Slider('r',self,min=0.0,step=0.05,max=1.0,label='Red'))
color_menu.append(ui.Slider('g',self,min=0.0,step=0.05,max=1.0,label='Green'))
color_menu.append(ui.Slider('b',self,min=0.0,step=0.05,max=1.0,label='Blue'))
color_menu.append(ui.Slider('a',self,min=0.0,step=0.05,max=1.0,label='Alpha'))
self.menu.append(color_menu)
def deinit_gui(self):
if self.menu:
self.g_pool.gui.remove(self.menu)
self.menu = None
def unset_alive(self):
self.alive = False
def gl_display(self):
pass
def get_init_dict(self):
return {'radius':self.radius,'color':(self.r, self.g, self.b, self.a),'thickness':self.thickness,'fill':self.fill}
def cleanup(self):
""" called when the plugin gets terminated.
This happens either voluntarily or forced.
if you have a GUI or glfw window destroy it here.
"""
self.deinit_gui()
|
{
"content_hash": "a27ec868468686072f8cf793dd7c9a9e",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 177,
"avg_line_length": 34.55555555555556,
"alnum_prop": 0.5913183279742765,
"repo_name": "fsxfreak/esys-pbi",
"id": "7dcd3c6d89ea5339e6a2362c6db16002942174a2",
"size": "3110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pupil/pupil_src/player/vis_circle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "118"
},
{
"name": "C++",
"bytes": "286538"
},
{
"name": "Python",
"bytes": "1172023"
},
{
"name": "Shell",
"bytes": "409"
}
],
"symlink_target": ""
}
|
class COMMAND_FLAGS:
IGNORED = 1
SILENCED = 2
class Command:
def __init__(self):
self.command = "true"
self.flags = 0
def serialize(self):
flagged = ""
if self.flags & COMMAND_FLAGS.IGNORED:
flagged = "-"
if self.flags & COMMAND_FLAGS.SILENCED:
flagged = "@"
command = ""
num_frags = 0
for frag in self.command.split('\n'):
command += frag + " \\\n\t"
num_frags += 1
if num_frags > 0:
command = command[:-4]
return '\t' + flagged + command.strip()
class Target:
def __init__(self, title = 'NOTHING'):
self.target_name = title
self.dependencies = []
self.commands = []
self.source = None
def serialize(self):
target_description = self.target_name + ": "
for dep in self.dependencies:
target_description += dep.target_name + " "
target_description = target_description.strip() + "\n"
for cmd in self.commands:
target_description += cmd.serialize() + "\n"
return target_description
if __name__ == "__main__":
cmd = Command()
cmd.command = "echo abc \\\n eh"
cmd.flags = COMMAND_FLAGS.IGNORED
t2 = Target()
t2.target_name = "abc_456"
t = Target()
t.target_name = "abc_123"
t.commands += [cmd]
t.dependencies += [t2]
print(t.serialize())
|
{
"content_hash": "a1fb1d3460e624751d5b1b604e854369",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 62,
"avg_line_length": 22.03030303030303,
"alnum_prop": 0.5254470426409904,
"repo_name": "hbirchtree/coffeecutie-imgui",
"id": "99e1d04a6c8d6677fc72ca01c11a69d00e2af440",
"size": "1454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toolchain/python/make_components/command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "37922"
},
{
"name": "CMake",
"bytes": "166718"
},
{
"name": "CSS",
"bytes": "2888"
},
{
"name": "Groovy",
"bytes": "6947"
},
{
"name": "HTML",
"bytes": "10316"
},
{
"name": "Java",
"bytes": "3508"
},
{
"name": "PowerShell",
"bytes": "6140"
},
{
"name": "Python",
"bytes": "108824"
},
{
"name": "Shell",
"bytes": "29964"
}
],
"symlink_target": ""
}
|
"""Various names for properties, transforms, etc."""
# All constants are for internal use only; no backwards-compatibility
# guarantees.
from __future__ import absolute_import
# TODO (altay): Move shared names to a common location.
# Standard file names used for staging files.
from builtins import object
PICKLED_MAIN_SESSION_FILE = 'pickled_main_session'
DATAFLOW_SDK_TARBALL_FILE = 'dataflow_python_sdk.tar'
STAGED_PIPELINE_FILENAME = "pipeline.pb"
STAGED_PIPELINE_URL_METADATA_FIELD = "pipeline_url"
# String constants related to sources framework
SOURCE_FORMAT = 'custom_source'
SOURCE_TYPE = 'CustomSourcesType'
SERIALIZED_SOURCE_KEY = 'serialized_source'
# In a released SDK, container tags are selected based on the SDK version.
# Unreleased versions use container versions based on values of
# BEAM_CONTAINER_VERSION and BEAM_FNAPI_CONTAINER_VERSION (see below).
# Update this version to the next version whenever there is a change that will
# require changes to legacy Dataflow worker execution environment.
BEAM_CONTAINER_VERSION = 'beam-master-20181018'
# Update this version to the next version whenever there is a change that
# requires changes to SDK harness container or SDK harness launcher.
BEAM_FNAPI_CONTAINER_VERSION = 'beam-master-20181018'
# Package names for different distributions
# TODO(BEAM-5939): Deduplicate with apache_beam/runners/portability/stager.py
BEAM_PACKAGE_NAME = 'apache-beam'
# SDK identifiers for different distributions
BEAM_SDK_NAME = 'Apache Beam SDK for Python'
DATAFLOW_CONTAINER_IMAGE_REPOSITORY = 'dataflow.gcr.io/v1beta3'
class TransformNames(object):
"""For internal use only; no backwards-compatibility guarantees.
Transform strings as they are expected in the CloudWorkflow protos."""
COLLECTION_TO_SINGLETON = 'CollectionToSingleton'
COMBINE = 'CombineValues'
CREATE_PCOLLECTION = 'CreateCollection'
DO = 'ParallelDo'
FLATTEN = 'Flatten'
GROUP = 'GroupByKey'
READ = 'ParallelRead'
WRITE = 'ParallelWrite'
class PropertyNames(object):
"""For internal use only; no backwards-compatibility guarantees.
Property strings as they are expected in the CloudWorkflow protos."""
BIGQUERY_CREATE_DISPOSITION = 'create_disposition'
BIGQUERY_DATASET = 'dataset'
BIGQUERY_QUERY = 'bigquery_query'
BIGQUERY_USE_LEGACY_SQL = 'bigquery_use_legacy_sql'
BIGQUERY_FLATTEN_RESULTS = 'bigquery_flatten_results'
BIGQUERY_EXPORT_FORMAT = 'bigquery_export_format'
BIGQUERY_TABLE = 'table'
BIGQUERY_PROJECT = 'project'
BIGQUERY_SCHEMA = 'schema'
BIGQUERY_WRITE_DISPOSITION = 'write_disposition'
DISPLAY_DATA = 'display_data'
ELEMENT = 'element'
ELEMENTS = 'elements'
ENCODING = 'encoding'
FILE_PATTERN = 'filepattern'
FILE_NAME_PREFIX = 'filename_prefix'
FILE_NAME_SUFFIX = 'filename_suffix'
FORMAT = 'format'
INPUTS = 'inputs'
IMPULSE_ELEMENT = 'impulse_element'
NON_PARALLEL_INPUTS = 'non_parallel_inputs'
NUM_SHARDS = 'num_shards'
OUT = 'out'
OUTPUT = 'output'
OUTPUT_INFO = 'output_info'
OUTPUT_NAME = 'output_name'
PARALLEL_INPUT = 'parallel_input'
PUBSUB_ID_LABEL = 'pubsub_id_label'
PUBSUB_SERIALIZED_ATTRIBUTES_FN = 'pubsub_serialized_attributes_fn'
PUBSUB_SUBSCRIPTION = 'pubsub_subscription'
PUBSUB_TIMESTAMP_ATTRIBUTE = 'pubsub_timestamp_label'
PUBSUB_TOPIC = 'pubsub_topic'
SERIALIZED_FN = 'serialized_fn'
SHARD_NAME_TEMPLATE = 'shard_template'
SOURCE_STEP_INPUT = 'custom_source_step_input'
STEP_NAME = 'step_name'
USER_FN = 'user_fn'
USER_NAME = 'user_name'
VALIDATE_SINK = 'validate_sink'
VALIDATE_SOURCE = 'validate_source'
VALUE = 'value'
WINDOWING_STRATEGY = 'windowing_strategy'
|
{
"content_hash": "edd5cba6ab06464303897e9224c074d5",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 78,
"avg_line_length": 35.80392156862745,
"alnum_prop": 0.7519167579408543,
"repo_name": "rangadi/incubator-beam",
"id": "4eabbbe8129a8a6de6a7d63422b23c16e60259d1",
"size": "4437",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/runners/dataflow/internal/names.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "50057"
},
{
"name": "Java",
"bytes": "11779709"
},
{
"name": "Protocol Buffer",
"bytes": "55082"
},
{
"name": "Python",
"bytes": "2864316"
},
{
"name": "Shell",
"bytes": "44966"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import os
from typing import Iterable, Optional
from pants.backend.python.subsystems.debugpy import DebugPy
from pants.backend.python.target_types import (
ConsoleScript,
PexEntryPointField,
ResolvedPexEntryPoint,
ResolvePexEntryPointRequest,
)
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.local_dists import LocalDistsPex, LocalDistsPexRequest
from pants.backend.python.util_rules.pex import Pex, PexRequest
from pants.backend.python.util_rules.pex_environment import PexEnvironment
from pants.backend.python.util_rules.pex_from_targets import (
InterpreterConstraintsRequest,
PexFromTargetsRequest,
)
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
)
from pants.core.goals.run import RunDebugAdapterRequest, RunRequest
from pants.core.subsystems.debug_adapter import DebugAdapterSubsystem
from pants.engine.addresses import Address
from pants.engine.fs import Digest, MergeDigests
from pants.engine.rules import Get, MultiGet, rule_helper
from pants.engine.target import TransitiveTargets, TransitiveTargetsRequest
def _in_chroot(relpath: str) -> str:
return os.path.join("{chroot}", relpath)
@rule_helper
async def _create_python_source_run_request(
address: Address,
*,
entry_point_field: PexEntryPointField,
pex_env: PexEnvironment,
run_in_sandbox: bool,
console_script: Optional[ConsoleScript] = None,
additional_pex_args: Iterable[str] = (),
) -> RunRequest:
addresses = [address]
entry_point, transitive_targets = await MultiGet(
Get(
ResolvedPexEntryPoint,
ResolvePexEntryPointRequest(entry_point_field),
),
Get(TransitiveTargets, TransitiveTargetsRequest(addresses)),
)
interpreter_constraints = await Get(
InterpreterConstraints, InterpreterConstraintsRequest(addresses)
)
pex_filename = (
address.generated_name.replace(".", "_") if address.generated_name else address.target_name
)
pex_get = Get(
Pex,
PexFromTargetsRequest(
addresses,
output_filename=f"{pex_filename}.pex",
internal_only=True,
include_source_files=False,
# `PEX_EXTRA_SYS_PATH` should contain this entry_point's module.
main=console_script or entry_point.val,
additional_args=(
*additional_pex_args,
# N.B.: Since we cobble together the runtime environment via PEX_EXTRA_SYS_PATH
# below, it's important for any app that re-executes itself that these environment
# variables are not stripped.
"--no-strip-pex-env",
),
),
)
sources_get = Get(
PythonSourceFiles, PythonSourceFilesRequest(transitive_targets.closure, include_files=True)
)
pex, sources = await MultiGet(pex_get, sources_get)
local_dists = await Get(
LocalDistsPex,
LocalDistsPexRequest(
addresses,
internal_only=True,
interpreter_constraints=interpreter_constraints,
sources=sources,
),
)
input_digests = [
pex.digest,
local_dists.pex.digest,
# Note regarding not-in-sandbox mode: You might think that the sources don't need to be copied
# into the chroot when using inline sources. But they do, because some of them might be
# codegenned, and those won't exist in the inline source tree. Rather than incurring the
# complexity of figuring out here which sources were codegenned, we copy everything.
# The inline source roots precede the chrooted ones in PEX_EXTRA_SYS_PATH, so the inline
# sources will take precedence and their copies in the chroot will be ignored.
local_dists.remaining_sources.source_files.snapshot.digest,
]
merged_digest = await Get(Digest, MergeDigests(input_digests))
complete_pex_env = pex_env.in_workspace()
args = complete_pex_env.create_argv(_in_chroot(pex.name), python=pex.python)
chrooted_source_roots = [_in_chroot(sr) for sr in sources.source_roots]
# The order here is important: we want the in-repo sources to take precedence over their
# copies in the sandbox (see above for why those copies exist even in non-sandboxed mode).
source_roots = [
*([] if run_in_sandbox else sources.source_roots),
*chrooted_source_roots,
]
extra_env = {
**pex_env.in_workspace().environment_dict(python_configured=pex.python is not None),
"PEX_PATH": _in_chroot(local_dists.pex.name),
"PEX_EXTRA_SYS_PATH": os.pathsep.join(source_roots),
}
return RunRequest(
digest=merged_digest,
args=args,
extra_env=extra_env,
)
@rule_helper
async def _create_python_source_run_dap_request(
regular_run_request: RunRequest,
*,
entry_point_field: PexEntryPointField,
debugpy: DebugPy,
debug_adapter: DebugAdapterSubsystem,
console_script: Optional[ConsoleScript] = None,
) -> RunDebugAdapterRequest:
entry_point, debugpy_pex = await MultiGet(
Get(
ResolvedPexEntryPoint,
ResolvePexEntryPointRequest(entry_point_field),
),
Get(Pex, PexRequest, debugpy.to_pex_request()),
)
merged_digest = await Get(
Digest, MergeDigests([regular_run_request.digest, debugpy_pex.digest])
)
extra_env = dict(regular_run_request.extra_env)
extra_env["PEX_PATH"] = os.pathsep.join(
[
extra_env["PEX_PATH"],
# For debugpy to work properly, we need to have just one "environment" for our
# command to run in. Therefore, we cobble one together by exeucting debugpy's PEX, and
# shoehorning in the original PEX through PEX_PATH.
_in_chroot(os.path.basename(regular_run_request.args[1])),
]
)
main = console_script or entry_point.val
assert main is not None
args = [
regular_run_request.args[0], # python executable
_in_chroot(debugpy_pex.name),
*debugpy.get_args(debug_adapter, main),
]
return RunDebugAdapterRequest(digest=merged_digest, args=args, extra_env=extra_env)
|
{
"content_hash": "9b69b38823d6fc9c822dece3afde809b",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 102,
"avg_line_length": 37.476470588235294,
"alnum_prop": 0.6780725160885261,
"repo_name": "benjyw/pants",
"id": "4336aa1e9a8859bfeaa1f46cfb2f656d931af6fb",
"size": "6502",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/python/pants/backend/python/goals/run_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "688"
},
{
"name": "Go",
"bytes": "67315"
},
{
"name": "Java",
"bytes": "10690"
},
{
"name": "Kotlin",
"bytes": "6433"
},
{
"name": "Mustache",
"bytes": "3595"
},
{
"name": "Python",
"bytes": "7135320"
},
{
"name": "Rust",
"bytes": "1601736"
},
{
"name": "Scala",
"bytes": "21950"
},
{
"name": "Shell",
"bytes": "31723"
},
{
"name": "Starlark",
"bytes": "72809"
}
],
"symlink_target": ""
}
|
"""Tests for the tensorflow_serving_client_workload script."""
import datetime
import sys
import unittest
import mock
import six
# These imports are mocked so that we don't need to add them to the
# test dependencies. The script under test for this test module is
# expected to execute only on a client VM which has built tensorflow
# from source.
sys.modules['grpc'] = mock.Mock()
sys.modules['grpc.beta'] = mock.Mock()
sys.modules['grpc.framework'] = mock.Mock()
sys.modules['grpc.framework.interfaces'] = mock.Mock()
sys.modules['grpc.framework.interfaces.face'] = mock.Mock()
sys.modules['grpc.framework.interfaces.face.face'] = mock.Mock()
sys.modules['tensorflow'] = mock.Mock()
sys.modules['tensorflow_serving'] = mock.Mock()
sys.modules['tensorflow_serving.apis'] = mock.Mock()
from perfkitbenchmarker.scripts import tensorflow_serving_client_workload # pylint: disable=g-import-not-at-top,g-bad-import-order
class TestTensorflowServingClientWorkload(unittest.TestCase):
def setUp(self):
flag_values = {
'server': '123:456',
'image_directory': '/fake',
'num_threads': 16,
'runtime': 20,
}
p = mock.patch(tensorflow_serving_client_workload.__name__ + '.FLAGS')
flags_mock = p.start()
flags_mock.configure_mock(**flag_values)
self.addCleanup(p.stop)
os_patch = mock.patch(tensorflow_serving_client_workload.__name__ + '.os')
os_patch.start()
self.addCleanup(os_patch.stop)
self.client_workload = (
tensorflow_serving_client_workload.TfServingClientWorkload())
def testPrintOutput(self):
self.client_workload.num_completed_requests = 10
self.client_workload.num_failed_requests = 2
self.client_workload.latencies = [1.1, 2.2, 3.3]
# Set start_time to an arbitarty datetime, and set end_time to 20 seconds
# after start_time.
self.client_workload.start_time = datetime.datetime(2000, 1, 1, 1, 1, 1, 1)
self.client_workload.end_time = datetime.datetime(2000, 1, 1, 1, 1, 21, 1)
expected_output = """
Completed requests: 10
Failed requests: 2
Runtime: 20.0
Number of threads: 16
Throughput: 0.5
Latency:
1.1
2.2
3.3""".strip()
out = six.StringIO()
self.client_workload.print_results(out=out)
actual_output = out.getvalue().strip()
self.assertEqual(expected_output, actual_output)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "9a3473de2a4e3d52b0e50b394431b4d2",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 131,
"avg_line_length": 30.857142857142858,
"alnum_prop": 0.6978114478114478,
"repo_name": "GoogleCloudPlatform/PerfKitBenchmarker",
"id": "0cea03abd440a3aee1ea79e552534db13f988f35",
"size": "2986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/scripts/tensorflow_serving_client_workload_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3420"
},
{
"name": "HTML",
"bytes": "113073"
},
{
"name": "Jinja",
"bytes": "62005"
},
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "6076512"
},
{
"name": "R",
"bytes": "1017"
},
{
"name": "Shell",
"bytes": "76164"
},
{
"name": "Tcl",
"bytes": "14601"
}
],
"symlink_target": ""
}
|
"""Generated test for checking pynos based actions
"""
import xml.etree.ElementTree as ET
from st2tests.base import BaseActionTestCase
from snmp_add_snmp_host import snmp_add_snmp_host
__all__ = [
'TestSnmpAddSnmpHost'
]
class MockCallback(object): # pylint:disable=too-few-public-methods
"""Class to hold mock callback and result
"""
returned_data = None
def callback(self, call, **kwargs): # pylint:disable=unused-argument
"""Mock callback method
"""
xml_result = ET.tostring(call)
self.returned_data = xml_result
class TestSnmpAddSnmpHost(BaseActionTestCase):
"""Test holder class
"""
action_cls = snmp_add_snmp_host
def test_action(self):
"""Generated test to check action
"""
action = self.get_action_instance()
mock_callback = MockCallback()
kwargs = {
'username': '',
'host_info': ['10.0.2.1', '135'],
'ip': '',
'password': '',
'port': '22',
'community': 'test',
'test': True,
'callback': mock_callback.callback
}
action.run(**kwargs)
expected_xml = (
'<config><snmp-server xmlns="urn:brocade.com:mgmt:brocade-snmp"><h'
'ost><ip>10.0.2.1</ip><community>test</community><udp-port>135</ud'
'p-port></host></snmp-server></config>'
)
self.assertTrue(expected_xml, mock_callback.returned_data)
|
{
"content_hash": "3d394cbe2b77cecc62b1c43fdffa04d3",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 79,
"avg_line_length": 28.07547169811321,
"alnum_prop": 0.5772849462365591,
"repo_name": "StackStorm/st2contrib",
"id": "ee374c6349e42f5368c3c96fa0b46f14bd119b8d",
"size": "1488",
"binary": false,
"copies": "3",
"ref": "refs/heads/st2contrib-deprecated-archive",
"path": "archive/packs/vdx/tests/test_action_snmp_add_snmp_host.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8532"
},
{
"name": "Makefile",
"bytes": "5581"
},
{
"name": "Python",
"bytes": "1362240"
},
{
"name": "Ruby",
"bytes": "3081"
},
{
"name": "Shell",
"bytes": "7781"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import sys
from io import TextIOWrapper, TextIOBase
from splunklib.six import ensure_str
from .event import ET
try:
from splunklib.six.moves import cStringIO as StringIO
except ImportError:
from splunklib.six import StringIO
class EventWriter(object):
"""``EventWriter`` writes events and error messages to Splunk from a modular input.
Its two important methods are ``writeEvent``, which takes an ``Event`` object,
and ``log``, which takes a severity and an error message.
"""
# Severities that Splunk understands for log messages from modular inputs.
# Do not change these
DEBUG = "DEBUG"
INFO = "INFO"
WARN = "WARN"
ERROR = "ERROR"
FATAL = "FATAL"
def __init__(self, output = sys.stdout, error = sys.stderr):
"""
:param output: Where to write the output; defaults to sys.stdout.
:param error: Where to write any errors; defaults to sys.stderr.
"""
self._out = output
self._err = error
# has the opening <stream> tag been written yet?
self.header_written = False
def write_event(self, event):
"""Writes an ``Event`` object to Splunk.
:param event: An ``Event`` object.
"""
if not self.header_written:
self._out.write("<stream>")
self.header_written = True
event.write_to(self._out)
def log(self, severity, message):
"""Logs messages about the state of this modular input to Splunk.
These messages will show up in Splunk's internal logs.
:param severity: ``string``, severity of message, see severities defined as class constants.
:param message: ``string``, message to log.
"""
self._err.write("%s %s\n" % (severity, message))
self._err.flush()
def write_xml_document(self, document):
"""Writes a string representation of an
``ElementTree`` object to the output stream.
:param document: An ``ElementTree`` object.
"""
self._out.write(ensure_str(ET.tostring(document)))
self._out.flush()
def close(self):
"""Write the closing </stream> tag to make this XML well formed."""
self._out.write("</stream>")
self._out.flush()
|
{
"content_hash": "7c6905fd7189b498ad4b01941758574b",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 100,
"avg_line_length": 31.54794520547945,
"alnum_prop": 0.6235345201910552,
"repo_name": "doksu/TA-centralops",
"id": "3e4321016ec868493b3cd9bdec1c4780c17cfa69",
"size": "2885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/splunklib/modularinput/event_writer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "404520"
}
],
"symlink_target": ""
}
|
"""Utilities for configuring platform specific installation."""
import os
import re
import shutil
from googlecloudsdk.core.credentials import gce as c_gce
from googlecloudsdk.core.util import console_io
from googlecloudsdk.core.util import platforms
# pylint:disable=superfluous-parens
# pylint:disable=unused-argument
def _UpdatePathForWindows(bin_path):
"""Update the Windows system path to include bin_path.
Args:
bin_path: str, The absolute path to the directory that will contain
Cloud SDK binaries.
"""
# pylint:disable=g-import-not-at-top, we want to only attempt these imports
# on windows.
try:
import win32con
import win32gui
try:
# Python 3
import winreg
except ImportError:
# Python 2
import _winreg as winreg
except ImportError:
print("""\
The installer is unable to automatically update your system PATH. Please add
{path}
to your system PATH to enable easy use of the Cloud SDK Command Line Tools.
""".format(path=bin_path))
return
def GetEnv(name):
root = winreg.HKEY_CURRENT_USER
subkey = 'Environment'
key = winreg.OpenKey(root, subkey, 0, winreg.KEY_READ)
try:
value, _ = winreg.QueryValueEx(key, name)
# pylint:disable=undefined-variable, This variable is defined in windows.
except WindowsError:
return ''
return value
def SetEnv(name, value):
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Environment', 0,
winreg.KEY_ALL_ACCESS)
winreg.SetValueEx(key, name, 0, winreg.REG_EXPAND_SZ, value)
winreg.CloseKey(key)
win32gui.SendMessage(
win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE, 0, 'Environment')
return value
def Remove(paths, value):
while value in paths:
paths.remove(value)
def PrependEnv(name, values):
paths = GetEnv(name).split(';')
for value in values:
if value in paths:
Remove(paths, value)
paths.insert(0, value)
SetEnv(name, ';'.join(paths))
PrependEnv('Path', [bin_path])
print("""\
The following directory has been added to your PATH.
{bin_path}
Create a new command shell for the changes to take effect.
""".format(bin_path=bin_path))
def UpdateRC(bash_completion, path_update, rc_path, bin_path, sdk_root):
"""Update the system path to include bin_path.
Args:
bash_completion: bool, Whether or not to do bash completion. If None, ask.
path_update: bool, Whether or not to do bash completion. If None, ask.
rc_path: str, The path to the rc file to update. If None, ask.
bin_path: str, The absolute path to the directory that will contain
Cloud SDK binaries.
sdk_root: str, The path to the Cloud SDK root.
"""
host_os = platforms.OperatingSystem.Current()
if host_os == platforms.OperatingSystem.WINDOWS:
if path_update is None:
path_update = console_io.PromptContinue(
prompt_string='Update %PATH% to include Cloud SDK binaries?')
if path_update:
_UpdatePathForWindows(bin_path)
return
completion_rc_path = os.path.join(sdk_root, 'completion.bash.inc')
path_rc_path = os.path.join(sdk_root, 'path.bash.inc')
if bash_completion is None:
if path_update is None: # Ask only one question if both were not set.
path_update = console_io.PromptContinue(
prompt_string=('\nModify profile to update your $PATH '
'and enable bash completion?'))
bash_completion = path_update
else:
bash_completion = console_io.PromptContinue(
prompt_string=('\nModify profile to enable bash completion?'))
elif path_update is None:
path_update = console_io.PromptContinue(
prompt_string=('\nModify profile to update your $PATH?'))
if not bash_completion:
print("""\
Source [{completion_rc_path}]
in your profile to enable bash completion for gcloud.
""".format(completion_rc_path=completion_rc_path))
if not path_update:
print("""\
Source [{path_rc_path}]
in your profile to add the Google Cloud SDK command line tools to your $PATH.
""".format(path_rc_path=path_rc_path))
if not bash_completion and not path_update:
return
if not rc_path:
# figure out what file to edit
if host_os == platforms.OperatingSystem.LINUX:
if c_gce.Metadata().connected:
file_name = '.bash_profile'
else:
file_name = '.bashrc'
elif host_os == platforms.OperatingSystem.MACOSX:
file_name = '.bash_profile'
elif host_os == platforms.OperatingSystem.CYGWIN:
file_name = '.bashrc'
elif host_os == platforms.OperatingSystem.MSYS:
file_name = '.profile'
else:
file_name = '.bashrc'
rc_path = os.path.expanduser(os.path.join('~', file_name))
rc_path_update = console_io.PromptResponse((
'The Google Cloud SDK installer will now prompt you to update an rc '
'file to bring the Google Cloud CLIs into your environment.\n\n'
'Enter path to an rc file to update, or leave blank to use '
'[{rc_path}]: ').format(rc_path=rc_path))
if rc_path_update:
rc_path = os.path.expanduser(rc_path_update)
if os.path.exists(rc_path):
with open(rc_path) as rc_file:
rc_data = rc_file.read()
cached_rc_data = rc_data
else:
rc_data = ''
cached_rc_data = ''
if path_update:
path_comment = r'# The next line updates PATH for the Google Cloud SDK.'
path_subre = re.compile(r'\n*'+path_comment+r'\n.*$',
re.MULTILINE)
path_line = "{comment}\nsource '{path_rc_path}'\n".format(
comment=path_comment, path_rc_path=path_rc_path)
filtered_data = path_subre.sub('', rc_data)
rc_data = '{filtered_data}\n{path_line}'.format(
filtered_data=filtered_data,
path_line=path_line)
if bash_completion:
complete_comment = r'# The next line enables bash completion for gcloud.'
complete_subre = re.compile(r'\n*'+complete_comment+r'\n.*$',
re.MULTILINE)
complete_line = "{comment}\nsource '{rc_path}'\n".format(
comment=complete_comment, rc_path=completion_rc_path)
filtered_data = complete_subre.sub('', rc_data)
rc_data = '{filtered_data}\n{complete_line}'.format(
filtered_data=filtered_data,
complete_line=complete_line)
if cached_rc_data == rc_data:
print('No changes necessary for [{rc}].'.format(rc=rc_path))
return
if os.path.exists(rc_path):
rc_backup = rc_path+'.backup'
print('Backing up [{rc}] to [{backup}].'.format(
rc=rc_path, backup=rc_backup))
shutil.copyfile(rc_path, rc_backup)
with open(rc_path, 'w') as rc_file:
rc_file.write(rc_data)
print("""\
[{rc_path}] has been updated.
Start a new shell for the changes to take effect.
""".format(rc_path=rc_path))
|
{
"content_hash": "ae29c1bbac84d178061c32e21c7fa728",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 78,
"avg_line_length": 32.46190476190476,
"alnum_prop": 0.6590875751796978,
"repo_name": "ychen820/microblog",
"id": "47d3b58942117e8a5e91b34723583c5e1fc404a5",
"size": "6868",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "y/google-cloud-sdk/lib/googlecloudsdk/core/util/platforms_install.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "414229"
},
{
"name": "CSS",
"bytes": "257787"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "Groff",
"bytes": "1236200"
},
{
"name": "HTML",
"bytes": "2617468"
},
{
"name": "JavaScript",
"bytes": "1106437"
},
{
"name": "Makefile",
"bytes": "15714"
},
{
"name": "Objective-C",
"bytes": "26302"
},
{
"name": "PHP",
"bytes": "2511443"
},
{
"name": "Perl",
"bytes": "1109010"
},
{
"name": "Python",
"bytes": "71588489"
},
{
"name": "R",
"bytes": "548"
},
{
"name": "Shell",
"bytes": "49796"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import os
import shutil
import subprocess
from pathlib import Path
from typing import Any, Dict, List, Union
from atomic_reactor.constants import (
IMAGE_TYPE_DOCKER_ARCHIVE,
PLUGIN_FETCH_SOURCES_KEY,
PLUGIN_SOURCE_CONTAINER_KEY,
)
from atomic_reactor.plugin import Plugin, PluginFailedException
from atomic_reactor.util import get_exported_image_metadata
from atomic_reactor.utils import retries
class SourceContainerPlugin(Plugin):
"""
Build source container image using
https://github.com/containers/BuildSourceImage
"""
is_allowed_to_fail = False
key = PLUGIN_SOURCE_CONTAINER_KEY
def export_image(self, image_output_dir: Path) -> Dict[str, Union[str, int]]:
output_path = self.workflow.build_dir.any_platform.exported_squashed_image
cmd = ['skopeo', 'copy']
source_img = 'oci:{}'.format(image_output_dir)
dest_img = 'docker-archive:{}'.format(output_path)
cmd += [source_img, dest_img]
cleanup_cmd = ['rm', str(output_path)]
try:
retries.run_cmd(cmd, cleanup_cmd)
except subprocess.CalledProcessError as e:
self.log.error("failed to save docker-archive :\n%s", e.output)
raise
return get_exported_image_metadata(str(output_path), IMAGE_TYPE_DOCKER_ARCHIVE)
def split_remote_sources_to_subdirs(self, remote_source_data_dir) -> List[str]:
"""Splits remote source archives to subdirs"""
sources_subdirs = []
for count, archive in enumerate(os.listdir(remote_source_data_dir)):
subdir = os.path.join(remote_source_data_dir, f"remote_source_{count}")
if not os.path.exists(subdir):
os.makedirs(subdir)
old_path = os.path.join(remote_source_data_dir, archive)
new_path = os.path.join(subdir, archive)
shutil.move(old_path, new_path)
sources_subdirs.append(subdir)
return sources_subdirs
def run(self) -> Dict[str, Any]:
"""Build image inside current environment.
:return: a mapping containing build results. If the build fails, key
``fail_reason`` must be included with a meaningful message.
:rtype: Dict[str, Any]
"""
fetch_sources_result = self.workflow.data.plugins_results.get(PLUGIN_FETCH_SOURCES_KEY, {})
source_data_dir = fetch_sources_result.get('image_sources_dir')
remote_source_data_dir = fetch_sources_result.get('remote_sources_dir')
maven_source_data_dir = fetch_sources_result.get('maven_sources_dir')
source_exists = source_data_dir and os.path.isdir(source_data_dir)
remote_source_exists = remote_source_data_dir and os.path.isdir(remote_source_data_dir)
maven_source_exists = maven_source_data_dir and os.path.isdir(maven_source_data_dir)
if not any([source_exists, remote_source_exists, maven_source_exists]):
err_msg = "No SRPMs directory '{}' available".format(source_data_dir)
err_msg += "\nNo Remote source directory '{}' available".format(remote_source_data_dir)
err_msg += "\nNo Maven source directory '{}' available".format(maven_source_data_dir)
self.log.error(err_msg)
raise PluginFailedException(err_msg)
if source_exists and not os.listdir(source_data_dir):
self.log.warning("SRPMs directory '%s' is empty", source_data_dir)
if remote_source_exists and not os.listdir(remote_source_data_dir):
self.log.warning("Remote source directory '%s' is empty", remote_source_data_dir)
if maven_source_exists and not os.listdir(maven_source_data_dir):
self.log.warning("Maven source directory '%s' is empty", maven_source_data_dir)
image_output_dir: Path = self.workflow.build_dir.source_container_output_dir
image_output_dir.mkdir(exist_ok=True)
cmd = ['bsi', '-d']
drivers = set()
if source_exists:
drivers.add('sourcedriver_rpm_dir')
cmd.append('-s')
cmd.append('{}'.format(source_data_dir))
if remote_source_exists:
sources_subdirs = self.split_remote_sources_to_subdirs(remote_source_data_dir)
drivers.add('sourcedriver_extra_src_dir')
for source_subdir in sources_subdirs:
cmd.append('-e')
cmd.append(source_subdir)
if maven_source_exists:
drivers.add('sourcedriver_extra_src_dir')
for maven_source_subdir in os.listdir(maven_source_data_dir):
cmd.append('-e')
cmd.append('{}'.format(os.path.join(maven_source_data_dir, maven_source_subdir)))
driver_str = ','.join(drivers)
cmd.insert(2, driver_str)
cmd.append('-o')
cmd.append('{}'.format(image_output_dir))
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, text=True)
except subprocess.CalledProcessError as e:
self.log.error("BSI failed with output:\n%s", e.output)
raise PluginFailedException('BSI utility failed build source image') from e
self.log.debug("Build log:\n%s\n", output)
# clean bsi temp directory
bsi_temp_dir = os.path.join(os.getcwd(), 'SrcImg')
if os.path.isdir(bsi_temp_dir):
self.log.info('Will remove BSI temporary directory: %s', bsi_temp_dir)
shutil.rmtree(bsi_temp_dir)
# clean all downloaded sources
if source_exists:
self.log.info('Will remove directory with downloaded srpms: %s', source_data_dir)
shutil.rmtree(source_data_dir)
if remote_source_exists:
self.log.info('Will remove directory with downloaded remote sources: %s',
remote_source_data_dir)
shutil.rmtree(remote_source_data_dir)
if maven_source_exists:
self.log.info('Will remove directory with downloaded maven sources: %s',
maven_source_data_dir)
shutil.rmtree(maven_source_data_dir)
image_metadata = self.export_image(image_output_dir)
self.log.info('Will remove unpacked image directory: %s', image_output_dir)
shutil.rmtree(image_output_dir)
return {
'image_metadata': image_metadata,
'logs': [output],
}
|
{
"content_hash": "81c8635cbedce4ec8a9da8799b087ed8",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 99,
"avg_line_length": 41.1375,
"alnum_prop": 0.6338498936493467,
"repo_name": "fr34k8/atomic-reactor",
"id": "98ace23968663f8dcdbf3ffef16378ee2c4b7012",
"size": "6582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atomic_reactor/plugins/build_source_container.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1203"
},
{
"name": "Makefile",
"bytes": "868"
},
{
"name": "Python",
"bytes": "2045752"
},
{
"name": "Shell",
"bytes": "3892"
}
],
"symlink_target": ""
}
|
from past.builtins import basestring
import unittest
import types
import os
from vulndb import DBVuln
from vulndb.db_vuln import Reference
class TestLoadAllJSON(unittest.TestCase):
maxDiff = None
def test_from_file(self):
failed_json_files = []
processed_files = []
for language in DBVuln.get_all_languages():
json_path = os.path.join(DBVuln.DB_PATH, language)
for _fname in os.listdir(json_path):
_file_path = os.path.join(json_path, _fname)
if os.path.isdir(_file_path):
continue
try:
DBVuln.LANG = language
dbv = DBVuln.from_file(_file_path)
except:
failed_json_files.append(_fname)
continue
processed_files.append(_fname)
self.assertIsInstance(dbv.title, basestring)
self.assertIsInstance(dbv.description, basestring)
self.assertIsInstance(dbv.id, int)
self.assertIsInstance(dbv.severity, basestring)
self.assertIsInstance(dbv.wasc, (type(None), list))
self.assertIsInstance(dbv.tags, (type(None), list))
self.assertIsInstance(dbv.cwe, (type(None), list))
self.assertIsInstance(dbv.owasp_top_10, (type(None), dict))
self.assertIsInstance(dbv.fix_effort, int)
self.assertIsInstance(dbv.fix_guidance, basestring)
for ref in dbv.references:
self.assertIsInstance(ref, Reference)
self.assertEqual(failed_json_files, [])
self.assertGreater(len(processed_files), 20)
|
{
"content_hash": "82c87da824536dc4186ef9a164aca55c",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 75,
"avg_line_length": 32.75471698113208,
"alnum_prop": 0.5720046082949308,
"repo_name": "vulndb/python-sdk",
"id": "3f57c5ab7f08b4e02e6e08ad800b6ed2ce7019a1",
"size": "1736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vulndb/tests/test_load_all_json.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "25477"
},
{
"name": "Shell",
"bytes": "6206"
}
],
"symlink_target": ""
}
|
"""Holds the create_app() Flask application factory. More information in create_app() docstring."""
from importlib import import_module
import locale
import os
from flask import Flask
from yaml import safe_load
import github_status as app_root
from github_status.blueprints import all_blueprints
from github_status.extensions import db
APP_ROOT_FOLDER = os.path.abspath(os.path.dirname(app_root.__file__))
TEMPLATE_FOLDER = os.path.join(APP_ROOT_FOLDER, 'templates')
STATIC_FOLDER = os.path.join(APP_ROOT_FOLDER, 'static')
def get_config(config_class_string, yaml_files=None):
"""Load the Flask config from a class.
Positional arguments:
config_class_string -- string representation of a configuration class that will be loaded (e.g.
'github_status.config.Production').
yaml_files -- List of YAML files to load. This is for testing, leave None in dev/production.
Returns:
A class object to be fed into app.config.from_object().
"""
config_module, config_class = config_class_string.rsplit('.', 1)
config_class_object = getattr(import_module(config_module), config_class)
config_obj = config_class_object()
# Expand some options.
db_fmt = 'github_status.models.{}'
if getattr(config_obj, 'DB_MODELS_IMPORTS', False):
config_obj.DB_MODELS_IMPORTS = [db_fmt.format(m) for m in config_obj.DB_MODELS_IMPORTS]
# Load additional configuration settings.
yaml_files = yaml_files or [f for f in [
os.path.join('/', 'etc', 'github_status', 'config.yml'),
os.path.abspath(os.path.join(APP_ROOT_FOLDER, '..', 'config.yml')),
os.path.join(APP_ROOT_FOLDER, 'config.yml'),
] if os.path.exists(f)]
additional_dict = dict()
for y in yaml_files:
with open(y) as f:
additional_dict.update(safe_load(f.read()))
# Merge the rest into the Flask app config.
for key, value in additional_dict.items():
setattr(config_obj, key, value)
return config_obj
def create_app(config_obj):
"""Flask application factory. Initializes and returns the Flask application.
Blueprints are registered in here.
Modeled after: http://flask.pocoo.org/docs/patterns/appfactories/
Positional arguments:
config_obj -- configuration object to load into app.config.
Returns:
The initialized Flask application.
"""
# Initialize app. Flatten config_obj to dictionary (resolve properties).
app = Flask(__name__, template_folder=TEMPLATE_FOLDER, static_folder=STATIC_FOLDER)
config_dict = dict([(k, getattr(config_obj, k)) for k in dir(config_obj) if not k.startswith('_')])
app.config.update(config_dict)
# Import DB models. Flask-SQLAlchemy doesn't do this automatically.
with app.app_context():
for module in app.config.get('DB_MODELS_IMPORTS', list()):
import_module(module)
# Setup redirects and register blueprints.
app.add_url_rule('/favicon.ico', 'favicon', lambda: app.send_static_file('favicon.ico'))
for bp in all_blueprints:
import_module(bp.import_name)
app.register_blueprint(bp)
# Initialize extensions/add-ons/plugins.
db.init_app(app)
# Activate middleware.
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') # For filters inside the middleware file.
with app.app_context():
import_module('github_status.middleware')
# Return the application instance.
return app
|
{
"content_hash": "0e324dc710ef38064787c574e125f753",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 103,
"avg_line_length": 36.17894736842105,
"alnum_prop": 0.687809135874309,
"repo_name": "Robpol86/Flask-Large-App-Example-2",
"id": "b508b25a1503956418ef7c18bb5e8a3806ed5694",
"size": "3437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "github_status/application.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36817"
},
{
"name": "JavaScript",
"bytes": "434957"
},
{
"name": "Python",
"bytes": "37154"
}
],
"symlink_target": ""
}
|
"""
MkdocsServer
This tool can be used to serve / preview the mkdocs output locally before publishing
"""
import os, sys, os.path as path
import SCons.Script
from SCons.Environment import Environment
from SCons.Script import Builder
# TODO always rebuild option needed?
# TODO fix relative imports when importing a single namespaced tool
from scons_tools_grbd.Tools.Docs.Mkdocs import MkdocsCommon
def exists(env):
"""Check if we're okay to load this builder"""
return MkdocsCommon.detect(env)
def generate(env):
"""Called when the tool is loaded into the environment at startup of script"""
assert(exists(env))
MkdocsCommon.setup_opts(env)
mkdocs_scanner = env.Scanner(
MkdocsCommon.MkdocsScanner,
'MkdocsScanner',
)
bld = Builder(
action = __MkdocsServer_func,
emitter = MkdocsCommon.Mkdocs_emitter,
source_scanner = mkdocs_scanner,
)
env.Append(BUILDERS = {'MkdocsServer' : bld})
def __MkdocsServer_func(target, source, env):
"""Actual builder that does the work after the Sconscript file is parsed"""
cmdopts = ['$Mkdocs', 'serve']
cmdopts.append('--config-file=' + str(source[0]))
serverurl = '127.0.0.1:8000'
if env['Mkdocs_ServeUrl']:
serverurl = str(env['Mkdocs_ServeUrl'])
cmdopts.append('--dev-addr=$Mkdocs_ServeUrl')
if env['Mkdocs_Strict']:
cmdopts.append('--strict')
if env['Mkdocs_Theme']:
cmdopts.append('--theme=$Mkdocs_Theme')
if env['Mkdocs_ThemeDir']:
cmdopts.append('--theme-dir=$Mkdocs_ThemeDir')
if env['Mkdocs_LiveReload'] == True:
cmdopts.append('--livereload')
elif env['Mkdocs_LiveReload'] == False:
cmdopts.append('--no-livereload')
if env['Mkdocs_DirtyReload'] == True:
cmdopts.append('--dirtyreload')
if env['Mkdocs_Quiet']:
cmdopts.append('--quiet')
if env['Mkdocs_Verbose']:
cmdopts.append('--verbose')
cmdopts = cmdopts + env['Mkdocs_ExtraArgs']
print('Starting MkDocs Server http://' + serverurl)
env.Execute(env.Action([cmdopts], chdir=env['Mkdocs_WorkingDir']))
print ("Server Closed.")
|
{
"content_hash": "a53c907924236ba9ca515a787d957888",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 84,
"avg_line_length": 33.72727272727273,
"alnum_prop": 0.6415094339622641,
"repo_name": "ASoftTech/Scons-Tools-Grbd",
"id": "091377291979389de6e5933bbc236b98cc052116",
"size": "2226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scons_tools_grbd/Tools/Docs/Mkdocs/MkdocsServer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "33"
},
{
"name": "CSS",
"bytes": "3966"
},
{
"name": "HTML",
"bytes": "11346"
},
{
"name": "Python",
"bytes": "67078"
}
],
"symlink_target": ""
}
|
"""SQLAlchemy storage backend."""
from __future__ import absolute_import
import datetime
import os
from oslo_config import cfg
from oslo_db import exception as dbexc
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log
from oslo_utils import timeutils
import sqlalchemy as sa
from ceilometer.event.storage import base
from ceilometer.event.storage import models as api_models
from ceilometer.i18n import _LE, _LI
from ceilometer.storage.sqlalchemy import models
from ceilometer import utils
LOG = log.getLogger(__name__)
AVAILABLE_CAPABILITIES = {
'events': {'query': {'simple': True}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
TRAIT_MAPLIST = [(api_models.Trait.NONE_TYPE, models.TraitText),
(api_models.Trait.TEXT_TYPE, models.TraitText),
(api_models.Trait.INT_TYPE, models.TraitInt),
(api_models.Trait.FLOAT_TYPE, models.TraitFloat),
(api_models.Trait.DATETIME_TYPE, models.TraitDatetime)]
TRAIT_ID_TO_MODEL = dict((x, y) for x, y in TRAIT_MAPLIST)
TRAIT_MODEL_TO_ID = dict((y, x) for x, y in TRAIT_MAPLIST)
trait_models_dict = {'string': models.TraitText,
'integer': models.TraitInt,
'datetime': models.TraitDatetime,
'float': models.TraitFloat}
def _build_trait_query(session, trait_type, key, value, op='eq'):
trait_model = trait_models_dict[trait_type]
op_dict = {'eq': (trait_model.value == value),
'lt': (trait_model.value < value),
'le': (trait_model.value <= value),
'gt': (trait_model.value > value),
'ge': (trait_model.value >= value),
'ne': (trait_model.value != value)}
conditions = [trait_model.key == key, op_dict[op]]
return (session.query(trait_model.event_id.label('ev_id'))
.filter(*conditions), trait_model)
class Connection(base.Connection):
"""Put the event data into a SQLAlchemy database.
Tables::
- EventType
- event definition
- { id: event type id
desc: description of event
}
- Event
- event data
- { id: event id
message_id: message id
generated = timestamp of event
event_type_id = event type -> eventtype.id
}
- TraitInt
- int trait value
- { event_id: event -> event.id
key: trait name
value: integer value
}
- TraitDatetime
- datetime trait value
- { event_id: event -> event.id
key: trait name
value: datetime value
}
- TraitText
- text trait value
- { event_id: event -> event.id
key: trait name
value: text value
}
- TraitFloat
- float trait value
- { event_id: event -> event.id
key: trait name
value: float value
}
"""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
def __init__(self, url):
# Set max_retries to 0, since oslo.db in certain cases may attempt
# to retry making the db connection retried max_retries ^ 2 times
# in failure case and db reconnection has already been implemented
# in storage.__init__.get_connection_from_config function
options = dict(cfg.CONF.database.items())
options['max_retries'] = 0
self._engine_facade = db_session.EngineFacade(url, **options)
if self._engine_facade.get_engine().name == 'sqlite':
self.isolation_level = 'SERIALIZABLE'
else:
self.isolation_level = 'REPEATABLE READ'
def upgrade(self):
# NOTE(gordc): to minimise memory, only import migration when needed
from oslo_db.sqlalchemy import migration
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'..', '..', 'storage', 'sqlalchemy',
'migrate_repo')
migration.db_sync(self._engine_facade.get_engine(), path)
def clear(self):
engine = self._engine_facade.get_engine()
for table in reversed(models.Base.metadata.sorted_tables):
engine.execute(table.delete())
engine.dispose()
def _get_or_create_event_type(self, event_type, session=None):
"""Check if an event type with the supplied name is already exists.
If not, we create it and return the record. This may result in a flush.
"""
if session is None:
session = self._engine_facade.get_session()
with session.begin(subtransactions=True):
et = session.query(models.EventType).filter(
models.EventType.desc == event_type).first()
if not et:
et = models.EventType(event_type)
session.add(et)
return et
def record_events(self, event_models):
"""Write the events to SQL database via sqlalchemy.
:param event_models: a list of model.Event objects.
"""
session = self._engine_facade.get_session()
error = None
for event_model in event_models:
event = None
try:
with session.begin():
event_type = self._get_or_create_event_type(
event_model.event_type, session=session)
event = models.Event(event_model.message_id, event_type,
event_model.generated,
event_model.raw)
session.add(event)
session.flush()
if event_model.traits:
trait_map = {}
for trait in event_model.traits:
if trait_map.get(trait.dtype) is None:
trait_map[trait.dtype] = []
trait_map[trait.dtype].append(
{'event_id': event.id,
'key': trait.name,
'value': trait.value})
for dtype in trait_map.keys():
model = TRAIT_ID_TO_MODEL[dtype]
session.execute(model.__table__.insert(),
trait_map[dtype])
except dbexc.DBDuplicateEntry as e:
LOG.info(_LI("Duplicate event detected, skipping it: %s") % e)
except KeyError as e:
LOG.exception(_LE('Failed to record event: %s') % e)
except Exception as e:
LOG.exception(_LE('Failed to record event: %s') % e)
error = e
if error:
raise error
def get_events(self, event_filter, limit=None):
"""Return an iterable of model.Event objects.
:param event_filter: EventFilter instance
"""
if limit == 0:
return
session = self._engine_facade.get_session()
with session.begin():
session.connection(
execution_options={'isolation_level': self.isolation_level})
# Build up the join conditions
event_join_conditions = [models.EventType.id ==
models.Event.event_type_id]
if event_filter.event_type:
event_join_conditions.append(models.EventType.desc ==
event_filter.event_type)
# Build up the where conditions
event_filter_conditions = []
if event_filter.message_id:
event_filter_conditions.append(
models.Event.message_id == event_filter.message_id)
if event_filter.start_timestamp:
event_filter_conditions.append(
models.Event.generated >= event_filter.start_timestamp)
if event_filter.end_timestamp:
event_filter_conditions.append(
models.Event.generated <= event_filter.end_timestamp)
trait_subq = None
# Build trait filter
if event_filter.traits_filter:
filters = list(event_filter.traits_filter)
trait_filter = filters.pop()
key = trait_filter.pop('key')
op = trait_filter.pop('op', 'eq')
trait_type, value = list(trait_filter.items())[0]
trait_subq, t_model = _build_trait_query(session, trait_type,
key, value, op)
for trait_filter in filters:
key = trait_filter.pop('key')
op = trait_filter.pop('op', 'eq')
trait_type, value = list(trait_filter.items())[0]
q, model = _build_trait_query(session, trait_type,
key, value, op)
trait_subq = trait_subq.filter(
q.filter(model.event_id == t_model.event_id).exists())
trait_subq = trait_subq.subquery()
query = (session.query(models.Event.id)
.join(models.EventType,
sa.and_(*event_join_conditions)))
if trait_subq is not None:
query = query.join(trait_subq,
trait_subq.c.ev_id == models.Event.id)
if event_filter_conditions:
query = query.filter(sa.and_(*event_filter_conditions))
query = query.order_by(models.Event.generated).limit(limit)
event_list = {}
# get a list of all events that match filters
for (id_, generated, message_id,
desc, raw) in query.add_columns(
models.Event.generated, models.Event.message_id,
models.EventType.desc, models.Event.raw).all():
event_list[id_] = api_models.Event(message_id, desc,
generated, [], raw)
# Query all traits related to events.
# NOTE (gordc): cast is done because pgsql defaults to TEXT when
# handling unknown values such as null.
trait_q = (
session.query(
models.TraitDatetime.event_id,
models.TraitDatetime.key, models.TraitDatetime.value,
sa.cast(sa.null(), sa.Integer),
sa.cast(sa.null(), sa.Float(53)),
sa.cast(sa.null(), sa.String(255)))
.filter(sa.exists().where(
models.TraitDatetime.event_id == query.subquery().c.id))
).union_all(
session.query(
models.TraitInt.event_id,
models.TraitInt.key, sa.null(),
models.TraitInt.value, sa.null(), sa.null())
.filter(sa.exists().where(
models.TraitInt.event_id == query.subquery().c.id)),
session.query(
models.TraitFloat.event_id,
models.TraitFloat.key, sa.null(), sa.null(),
models.TraitFloat.value, sa.null())
.filter(sa.exists().where(
models.TraitFloat.event_id == query.subquery().c.id)),
session.query(
models.TraitText.event_id,
models.TraitText.key, sa.null(), sa.null(), sa.null(),
models.TraitText.value)
.filter(sa.exists().where(
models.TraitText.event_id == query.subquery().c.id)))
for id_, key, t_date, t_int, t_float, t_text in (
trait_q.order_by(models.TraitDatetime.key)).all():
if t_int is not None:
dtype = api_models.Trait.INT_TYPE
val = t_int
elif t_float is not None:
dtype = api_models.Trait.FLOAT_TYPE
val = t_float
elif t_date is not None:
dtype = api_models.Trait.DATETIME_TYPE
val = t_date
else:
dtype = api_models.Trait.TEXT_TYPE
val = t_text
try:
trait_model = api_models.Trait(key, dtype, val)
event_list[id_].append_trait(trait_model)
except KeyError:
LOG.warning('Trait key: %(key)s, val: %(val)s, for event: '
'%(event)s not valid.' %
{'key': key, 'val': val, 'event': id_})
return event_list.values()
def get_event_types(self):
"""Return all event types as an iterable of strings."""
session = self._engine_facade.get_session()
with session.begin():
query = (session.query(models.EventType.desc).
order_by(models.EventType.desc))
for name in query.all():
# The query returns a tuple with one element.
yield name[0]
def get_trait_types(self, event_type):
"""Return a dictionary containing the name and data type of the trait.
Only trait types for the provided event_type are returned.
:param event_type: the type of the Event
"""
session = self._engine_facade.get_session()
with session.begin():
for trait_model in [models.TraitText, models.TraitInt,
models.TraitFloat, models.TraitDatetime]:
query = (session.query(trait_model.key)
.join(models.Event,
models.Event.id == trait_model.event_id)
.join(models.EventType,
sa.and_(models.EventType.id ==
models.Event.event_type_id,
models.EventType.desc == event_type))
.distinct())
dtype = TRAIT_MODEL_TO_ID.get(trait_model)
for row in query.all():
yield {'name': row[0], 'data_type': dtype}
def get_traits(self, event_type, trait_type=None):
"""Return all trait instances associated with an event_type.
If trait_type is specified, only return instances of that trait type.
:param event_type: the type of the Event to filter by
:param trait_type: the name of the Trait to filter by
"""
session = self._engine_facade.get_session()
with session.begin():
for trait_model in [models.TraitText, models.TraitInt,
models.TraitFloat, models.TraitDatetime]:
query = (session.query(trait_model.key, trait_model.value)
.join(models.Event,
models.Event.id == trait_model.event_id)
.join(models.EventType,
sa.and_(models.EventType.id ==
models.Event.event_type_id,
models.EventType.desc == event_type))
.order_by(trait_model.key))
if trait_type:
query = query.filter(trait_model.key == trait_type)
dtype = TRAIT_MODEL_TO_ID.get(trait_model)
for k, v in query.all():
yield api_models.Trait(name=k,
dtype=dtype,
value=v)
def clear_expired_event_data(self, ttl):
"""Clear expired data from the backend storage system.
Clearing occurs according to the time-to-live.
:param ttl: Number of seconds to keep records for.
"""
session = self._engine_facade.get_session()
with session.begin():
end = timeutils.utcnow() - datetime.timedelta(seconds=ttl)
event_q = (session.query(models.Event.id)
.filter(models.Event.generated < end))
event_subq = event_q.subquery()
for trait_model in [models.TraitText, models.TraitInt,
models.TraitFloat, models.TraitDatetime]:
(session.query(trait_model)
.filter(trait_model.event_id.in_(event_subq))
.delete(synchronize_session="fetch"))
event_rows = event_q.delete()
# remove EventType and TraitType with no corresponding
# matching events and traits
(session.query(models.EventType)
.filter(~models.EventType.events.any())
.delete(synchronize_session="fetch"))
LOG.info(_LI("%d events are removed from database"), event_rows)
|
{
"content_hash": "fbec1f0172f97dc57d7474d29cde6a21",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 79,
"avg_line_length": 42.03864734299517,
"alnum_prop": 0.5145368880717076,
"repo_name": "mathslinux/ceilometer",
"id": "a34831a56a9322a11698e58a743d50da707a694f",
"size": "17952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceilometer/event/storage/impl_sqlalchemy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2849027"
},
{
"name": "Shell",
"bytes": "29510"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import multiselectfield.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('display_name', models.CharField(max_length=255)),
('address_line_1', models.CharField(max_length=255)),
('address_line_2', models.CharField(max_length=255)),
('full_name', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('phone', models.CharField(max_length=255)),
('website', models.CharField(max_length=255)),
('ideal_customer', models.IntegerField(choices=[(0, 'Individuals'), (1, 'Small Businesses'), (2, 'Large/Industrial Businesses')], default=0)),
('is_cad_assistance', models.BooleanField(default=False)),
('budget', models.IntegerField(choices=[(0, 'Under $100'), (1, '$100 - 500'), (2, '$500 - 2,500'), (3, '$2,500+')], default=0)),
('basic_material', models.IntegerField(choices=[(0, 'Plastic / Resin'), (1, 'Metal'), (2, 'Stone'), (3, 'Other')], default=0)),
('consideration', models.IntegerField(choices=[(0, 'Speed'), (1, 'Accuracy'), (2, 'Cost')], default=0)),
('printing_options', multiselectfield.db.fields.MultiSelectField(choices=[(0, 'FDM (Fused Deposition Modeling)'), (1, 'SLA (Sterolithography)'), (2, 'DLP (Digital Light Processing)'), (3, 'SLS (Selective Laser Sintering)'), (4, 'Material Jetting (PolyJet / MultiJet)'), (5, 'BJ (Binderjetting)'), (6, 'EBM (Electron Beam Melting)')], max_length=13)),
('material', models.IntegerField(blank=True, choices=[(0, 'PLA (Polylactide)'), (1, 'ABS (Acrylonitrile Butadiene Styrene)'), (2, 'ABS-Like'), (3, 'PETG (Polyethylene Terephthalate)'), (4, 'TPE (Thermoplastic Elastomers)'), (5, 'PC (Polycarbonate)'), (6, 'Nylon'), (7, 'Reinforced Nylon'), (8, 'Sandstone'), (9, 'Stainless Steel'), (10, 'Titanium'), (11, 'Aluminum')], null=True)),
('top_printing_processes', multiselectfield.db.fields.MultiSelectField(choices=[(0, 'FDM (Fused Deposition Modeling)'), (1, 'SLA (Sterolithography)'), (2, 'DLP (Digital Light Processing)'), (3, 'SLS (Selective Laser Sintering)'), (4, 'Material Jetting (PolyJet / MultiJet)'), (5, 'BJ (Binderjetting)'), (6, 'EBM (Electron Beam Melting)')], max_length=13)),
('description', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
{
"content_hash": "19be97bfafeacbf1cddfafe4d9c95ed2",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 397,
"avg_line_length": 72.81395348837209,
"alnum_prop": 0.6065154902587033,
"repo_name": "hqpr/findyour3d",
"id": "6886c454e6f051ae852c87820391cec3e462c1d1",
"size": "3204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "findyour3d/company/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "679650"
},
{
"name": "CoffeeScript",
"bytes": "83631"
},
{
"name": "HTML",
"bytes": "197307"
},
{
"name": "JavaScript",
"bytes": "471026"
},
{
"name": "Python",
"bytes": "230763"
},
{
"name": "Shell",
"bytes": "4684"
}
],
"symlink_target": ""
}
|
class AbstractProvider:
"""
Abstract provider defining methods for writing and loading profiles.
root level: Defaults for totem for all clusters.
cluster level: Defaults for particular cluster.
organization level: Defaults for particular organization
repository: Defaults for particular repository
ref level: Defaults for particular tag or a branch
The implementation (like S3) must support multi level layout.
"""
def not_supported(self):
"""
Raises NotImplementedError with a message
:return:
"""
raise NotImplementedError(
'Provider: %s does not support this operation' % self.__class__)
def load(self, profile, public=True):
"""
Load public key for given profile
:param profile: String defining encryption profile (or key id)
:type profile: str
:keyword public: Boolean value specifying whether public or private key
should be loaded.
:type public: bool
:return: Base64 encoded key
:rtype: str
:raise NotImplementedError: If provider does not support this method.
"""
self.not_supported()
def write(self, profile, data, public=True):
"""
Write public key for given profile.
:param profile: String defining encryption profile (or key id)
:type profile: str
:param data: Base64 encoded key
:keyword public: Boolean value specifying whether public or private key
should be loaded.
:type public: bool
:return: None
:raise NotImplementedError: If provider does not support this method.
"""
self.not_supported()
def delete(self, profile):
"""
Deletes public and private keys defined by the profile.
:param profile: String defining encryption profile (or key id)
:type profile: str
:return: None
:raise NotImplementedError: If provider does not support this method.
"""
self.not_supported()
|
{
"content_hash": "11f3e4b57f1fbb23f801fc0b18b5ed44",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 79,
"avg_line_length": 33.868852459016395,
"alnum_prop": 0.6393998063891578,
"repo_name": "totem/totem-encrypt",
"id": "268a12683f7a546f0a24a9ecb771d1f521f274f9",
"size": "2068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "encryption/store/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22814"
}
],
"symlink_target": ""
}
|
"""JUNOS link down test
Run: python -m unittest tests.test_spotmax
"""
import unittest
from spotmax import SpotMAX, SPOTGroup
from collections import defaultdict
class MockNetworkDevice(object):
def __init__(self, asset, loopback=None, groups='', username=None, password=None, ssh_keyfile=None):
if loopback == '1':
self.facts = DEVICE1_FACTS
else:
self.facts = DEVICE2_FACTS
self.interfaces = {'ge-0/0/6': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'54:E0:32:30:87:09',
u'speed': -1}}
self.macs = [{u'active': True,
u'interface': u'ge-0/0/0.0',
u'last_move': 0.0,
u'mac': u'B8:27:EB:04:FF:A2',
u'moves': 0,
u'static': False,
u'vlan': 201},
{u'active': True,
u'interface': u'xe-0/1/0.0',
u'last_move': 0.0,
u'mac': u'F8:C0:01:38:4F:38',
u'moves': 0,
u'static': False,
u'vlan': 201},
{u'active': True,
u'interface': u'Router',
u'last_move': 0.0,
u'mac': u'54:E0:32:30:87:01',
u'moves': 0,
u'static': True,
u'vlan': 0}]
self.vlans = list()
self.lldp = dict()
class MockData(object):
def __init__(self):
self.data = defaultdict()
def add(self, query):
# Add asset/group
if query.get('asset'):
self.data[query['asset']] = query
elif query.get('group'):
self.data[query['group']] = query
def get(self, key):
# Return cursor
cursor = MockCursor()
# Add data to cursor if found
data = self.data.get(key, None)
if data:
cursor.add_data(data)
return cursor
def delete(self, key):
del(self.data[key])
def __getitem__(self, asset):
return self.data.get(asset, {})
class MockCursor(object):
"""Mocks a MongoDB cursor object."""
def __init__(self):
self.deleted_count = 0
self.modified_count = 0
self.matched_count = 0
self.data_count = 0
self.data = list()
def add_data(self, data=None):
"""Add data to cursor."""
self.data.append(data)
self.data_count += 1
def count(self):
"""Return number of items in cursor."""
return self.data_count
def get(self, key):
"""Return data or '' if not found."""
return self.data.get(key, '')
def limit(self, limit):
"""Ignore limit and return self."""
return self
def skip(self, limit):
"""Ignore skip and return self."""
return self
def __getitem__(self, index):
"""Makes the object indexable."""
return self.data[index][0]
class MockCollection(object):
def __init__(self):
self.data = MockData()
def _get_key(self, query):
"""Get key, either 'asset' or 'group'"""
# Get asset name
key = query.get('asset')
# If not key, check if this is a group query
if not key:
key = query.get('group')
return key
def find(self, query):
# Get asset name
key = query.get('asset')
# If not key, check if this is a group query
if not key:
key = query.get('group')
# Check if asset is empty and if query is in "or" form
# Eg. {'$or': [{'asset': {'$regex': 'test_asset'}}, {'interfaces.asset': {'$regex': 'test_asset'}}, {'macs.asset': {'$regex': 'test_asset'}}]}
if not key and '$or' in query.keys():
key = query['$or'][0]['asset']['$regex']
# If asset is set return MockCursor with the entry
# Otherwise return empty MockCursor
if key:
cursor = MockCursor()
# Find asset
if self.data.get(key).data:
cursor.add_data(self.data.get(key).data)
return cursor
else:
return MockCursor()
def find_and_modify(self, query=None, update=None, upsert=None):
key = self._get_key(query)
for updates in update:
for variable in update[updates]:
name = variable.split('.')[2]
value = update[updates][variable]
data = {name: value}
if data in self.data[key].get('variables', []):
return None
else:
# Add variables if missing
if not self.data[key].get('variables'):
self.data[key]['variables'] = list()
self.data[key]['variables'].append(data)
return True
def find_one(self, query):
key = self._get_key(query)
if self.data.get(key):
return self.data[key]
else:
return MockData()
def insert_one(self, asset):
self.data.add(asset)
def update_one(self, query=None, update=None):
key = self._get_key(query)
cursor = MockCursor()
# Push / add action
if update.get('$push'):
name = update['$push']['variables'].keys()[0]
value = update['$push']['variables'][name]
self.data[key]['variables'].append({name: value})
# Remove action
elif update.get('$pull'):
name = update['$pull']['variables'].keys()[0]
value = update['$pull']['variables'][name]
self.data[key]['variables'].remove({name: value})
# Set modified
cursor.modified_count = 1
elif update.get('$set'):
for data_key in update['$set'].keys():
self.data.data[key][data_key] = update['$set'][data_key]
cursor.matched_count = 1
# Return cursor
return cursor
def delete_one(self, query=None):
asset = query['asset']
cursor = MockCursor()
# Delete data if found. Otherwise return empty cursor
if self.data.get(asset).count() > 0:
self.data.delete(asset)
cursor.deleted_count = 1
return cursor
class TestSPOTMAX(unittest.TestCase):
def setUp(self):
self.sm = SpotMAX(None, None)
self.sm.collection = MockCollection()
def test_add_variable(self):
# Asset does not exist
self.assertFalse(self.sm.add_variable('asset', 'test_asset', 'TEST_VAR:TEST_VALUE'))
# Add asset
self.sm.collection.data.add({'asset': 'test_asset', 'variables': []})
# Test adding new variable
self.assertTrue(self.sm.add_variable('asset', 'test_asset', 'TEST_VAR:NEW_VALUE'))
# Test modifying new variable
self.assertTrue(self.sm.add_variable('asset', 'test_asset', 'TEST_VAR:MOD_VALUE'))
def test_delete(self):
asset = 'test_asset'
# Delete non-existing asset
self.assertFalse(self.sm.delete(asset, key='asset'))
# Add asset and test
self.sm.collection.data.add({'asset': 'test_asset', 'variables': []})
self.assertTrue(self.sm.delete(asset, key='asset'))
def test_delete_variable(self):
asset = 'test_asset'
variable = 'TEST_VAR:TEST_VALUE'
# Asset doesn't exist
self.assertFalse(self.sm.delete_variable(asset, variable, 'asset'))
# Add asset and variable
self.sm.collection.data.add({'asset': 'test_asset', 'variables': []})
self.sm.add_variable('asset', 'test_asset', variable)
self.assertTrue(self.sm.delete_variable(asset, 'TEST_VAR', 'asset'))
def test_exist(self):
asset = 'test_asset'
# Non-existing asset
self.assertFalse(self.sm._exist(asset, key='asset'))
# Add asset and test
self.sm.collection.data.add({'asset': 'test_asset', 'variables': []})
self.assertTrue(self.sm._exist(asset, key='asset'))
def test_parse_search_term(self):
# Asset name
self.assertEqual((None, 'router_name'), SpotMAX().parse_variable('router_name'))
self.assertNotEqual((None, 'router_name'), SpotMAX().parse_variable('router_name2'))
# Key and value search
self.assertEqual(('group', 'white'), SpotMAX().parse_variable('group:white'))
self.assertEqual(('group', 'white:blue'), SpotMAX().parse_variable('group:white:blue'))
self.assertEqual(('mac', 'B0:A8:6E:0C:5C:2B'), SpotMAX().parse_variable('mac:b0:a8:6e:0c:5c:2b'))
self.assertEqual(('interface', 'ge-0/0/12'), SpotMAX().parse_variable('interface:ge-0/0/12'))
self.assertEqual(('version', '14.1X53-D40.8'), SpotMAX().parse_variable('version:14.1X53-D40.8'))
# Misspelled key value search
self.assertEqual(('group', ':white'), SpotMAX().parse_variable('group::white'))
def test_search(self):
asset = 'test_asset'
# Search empty database
self.assertEqual(0, self.sm.search(asset).count())
# Add asset
self.sm.collection.data.add({'asset': 'test_asset', 'variables': []})
self.assertEqual([{'variables': [], 'asset': 'test_asset'}], self.sm.search(asset).data[0])
# Add variable
self.assertTrue(self.sm.add_variable('asset', 'test_asset', 'TEST_VAR:NEW_VALUE'))
self.assertEqual([{'variables': [{'TEST_VAR': 'NEW_VALUE'}], 'asset': 'test_asset'}],
self.sm.search(asset).data[0])
# Test non-existing asset again
self.assertEqual(0, self.sm.search('empty_again').count())
class TestSPOTGroup(unittest.TestCase):
def setUp(self):
#netspot.NetworkDevice = MockNetworkDevice
# Setup mock SpotMAX and collection
self.ns = SPOTGroup(database=None, collection=None)
self.ns.collection = MockCollection()
def test_add_group(self):
# Add new group
self.assertTrue(self.ns.add_group('test_group'))
# Already exist
self.assertFalse(self.ns.add_group('test_group'))
def test_delete_variable(self):
group = 'test_group'
variable = 'TEST_VAR:TEST_VALUE'
# Add group and test variable
self.assertTrue(self.ns.add_group(group))
self.assertTrue(self.ns.add_variable('group', group, variable))
# Delete variable
self.assertTrue(self.ns.delete_variable(group, 'TEST_VAR', 'group'))
def test_get_variables(self):
group = 'test_group'
variable = 'TEST_VAR:TEST_VALUE'
variable2 = 'TEST_VAR2:TEST_VALUE2'
# Add group and test variable
self.assertTrue(self.ns.add_group(group))
self.assertTrue(self.ns.add_variable('group', group, variable))
# Get variables
variables = self.ns.get_variables(group)
self.assertEqual(1, len(variables))
self.assertEqual(['TEST_VAR'], variables[0].keys())
self.assertEqual('TEST_VALUE', variables[0]['TEST_VAR'])
# Add another variable
self.assertTrue(self.ns.add_variable('group', group, variable2))
variables = self.ns.get_variables(group)
self.assertEqual(2, len(variables))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "ee2deabb1d36ed18a88b064e48fc0479",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 146,
"avg_line_length": 29.673239436619717,
"alnum_prop": 0.5976836909056389,
"repo_name": "MaxIV-KitsControls/netspot",
"id": "69c6a1ed61ec79b4a0e871a9dff59573ecdd98f0",
"size": "10556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netspot/lib/spotmax/tests/test_spotmax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "77841"
},
{
"name": "HTML",
"bytes": "43477"
},
{
"name": "JavaScript",
"bytes": "114144"
},
{
"name": "Python",
"bytes": "213676"
}
],
"symlink_target": ""
}
|
import unittest
import converter
class ConverterTestCase(unittest.TestCase):
def test_usd(self):
usd = converter.convert('usd', 'USD', 1.00)
self.assertEquals(usd, 1.0)
def test_jpy(self):
jpy = converter.convert('usd', 'jpy', 1.00)
self.assertEquals(type(u'1.0'), type(jpy))
self.assertNotEquals(jpy, u'0.000')
def test_dummy_currency(self):
foo = converter.convert('usd', 'foo', 1.00)
self.assertEquals(foo, u'0.000')
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "dc0575b16fbb9ba2dbb87b697f4433c5",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 51,
"avg_line_length": 27.75,
"alnum_prop": 0.5891891891891892,
"repo_name": "brianriley/python-currency",
"id": "2c2acda10c819d6315975275421515b673b4ad00",
"size": "555",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1330"
}
],
"symlink_target": ""
}
|
"""Exceptions for database connectors."""
class ConnectorError(Exception):
"""Base connector error"""
class DumpError(ConnectorError):
"""Error on dump"""
class RestoreError(ConnectorError):
"""Error on restore"""
class CommandConnectorError(ConnectorError):
"""Failing command"""
|
{
"content_hash": "8bd3b2f6aaca6e6744b367ea5ea21364",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 44,
"avg_line_length": 20.2,
"alnum_prop": 0.7095709570957096,
"repo_name": "ZuluPro/django-dbbackup",
"id": "f22373060780d5f2bd2851154bc10a575c084246",
"size": "303",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dbbackup/db/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "493"
},
{
"name": "Python",
"bytes": "159735"
},
{
"name": "Shell",
"bytes": "2875"
}
],
"symlink_target": ""
}
|
from datetime import timedelta
import logging
import re
import xml.sax
import json
from google.appengine.ext import ndb
from google.appengine.api import taskqueue
from ffe.gae import increase_counter
from ffe.ffe_time import cet_from_string, string_from_cet, utc_from_cet
from ffe.markup import XMLImporter
from TABasics import task_name
class StopStatuses:
announced, extra, canceled, altDestination, finalDestination, planned, revoked = range(7)
s = ['announced', 'extra', 'canceled', 'altDestination', 'finalDestination', 'planned', 'revoked']
class TAStop(object):
# Stored attributes:
#-----------------------------------------------------------------------------------|
# internal variable | | external name | json | format ( > json) |
#-----------------------------------------------------------------------------------|
station_id = None # station_id | si | 'nl.asd' |
mission_id = None # mission_id | mi | 'nl.2145' |
status = 0 # status | s | 0 |
now = None # now | now | datetime > string |
arrival = None # arrival | a | datetime > string |
departure = None # departure | v | datetime > string |
delay_arr = 0.0 # delay_arr | da | 0.0 |
delay_dep = 0.0 # delay_dep | dv | 0.0 |
destination = None # destination | de | 'Amsterdam' |
alteredDestination = None # alteredDestination | ad | 'Amsterdam' |
platform = None # platform | p | '5b' > '5b' |
platformChange = False # platformChange | pc | True > '5b' |
#-----------------------------------------------------------------------------------|
# ====== Serializing and deserializing ==================================================================
@property
def repr(self):
dictionary = {}
if self.station_id != None: dictionary['si'] = self.station_id
if self.mission_id != None: dictionary['mi'] = self.mission_id
if self.status: dictionary['s'] = self.status
if self.arrival != None: dictionary['a'] = string_from_cet(self.arrival)
if self.departure != None: dictionary['v'] = string_from_cet(self.departure)
if self.now != None: dictionary['now'] = string_from_cet(self.now)
if self.delay_arr: dictionary['da'] = self.delay_arr
if self.delay_dep: dictionary['dv'] = self.delay_dep
if self.destination != None: dictionary['de'] = self.destination
if self.alteredDestination != None: dictionary['ad'] = self.alteredDestination
if self.platformChange:
if self.platform != None: dictionary['pc'] = self.platform
else:
if self.platform != None: dictionary['p'] = self.platform
return dictionary
@classmethod
def fromRepr(cls, dictionary):
self = cls()
self.station_id = dictionary.get('si', None)
self.mission_id = dictionary.get('mi', None)
self.status = dictionary.get('s', 0)
a = dictionary.get('a', None)
if a: self.arrival = cet_from_string(a)
v = dictionary.get('v', None)
if v: self.departure = cet_from_string(v)
now = dictionary.get('now', None)
if now: self.now = cet_from_string(now)
self.delay_arr = dictionary.get('da', 0.0)
self.delay_dep = dictionary.get('dv', 0.0)
self.destination = dictionary.get('de', None)
self.alteredDestination = dictionary.get('ad', None)
self.platform = dictionary.get('p', None)
if self.platform != None:
self.platformChange = False
else:
self.platform = dictionary.get('pc', None)
if self.platform != None:
self.platformChange = True
return self
@property
def station(self):
return ndb.Key('TSStation', self.station_id).get()
@property
def station_url(self):
return '/agent/station/%s' % self.station_id
@property
def station_code(self):
comps = self.station_id.split('.')
return comps[1]
@property
def number(self):
return int(self.mission_id.split('.')[1])
@property
def up(self):
return self.number % 2
@property
def est_arrival(self):
delay = timedelta(minutes=self.delay_arr)
return self.arrival + delay
@property
def arrival_string(self):
if self.arrival:
return self.arrival.strftime('%H:%M')
else:
return '--'
@property
def est_departure(self):
delay = timedelta(minutes=self.delay_dep)
return self.departure + delay
@property
def departure_string(self):
if self.departure:
return self.departure.strftime('%H:%M')
else:
return '--'
@property
def real_destination(self):
if self.alteredDestination:
return self.alteredDestination
else:
return self.destination
@classmethod
def parse_avt(cls, xml_string, delegate):
handler = StopsImporter()
handler.set_up(delegate)
xml.sax.parseString(xml_string, handler)
return handler.updated_objects.values()
@classmethod
def revoked_stop(cls, mission_id, station_id):
stop = cls()
stop.mission_id = mission_id
stop.station_id = station_id
stop.status = StopStatuses.revoked
return stop
def forward_to_mission(self, issue_time_cet):
"""
Creates a task in order to forward the stop to its mission
:param issue_time_cet: the time at which the task will be executed
:return: a taskqueue.Task that can be issued to the taskqueue
"""
label = 'fwd_' + self.station_code
url = '/TAMission/%s' % self.mission_id
payload = json.dumps(self.repr)
logging.info('Forward stop to %s at %s CET' % (self.mission_id, issue_time_cet.strftime('%H:%M:%S')))
issue_time = utc_from_cet(issue_time_cet)
return taskqueue.Task(name=task_name(issue_time, label),
url=url,
eta=issue_time,
payload=payload,
headers={'Content-Type': 'application/json'})
# ====== XML Parser ==================================================================
class StopsImporter(XMLImporter):
now = None
delegate = None
replaced_mission_codes = None
error = False
data = None
train_ref = ''
stop_status = StopStatuses.announced
departure = ''
delay = ''
destination = ''
alt_destination = None
platform = ''
platform_change = False
def set_up(self, delegate):
self.delegate = delegate
self.replaced_mission_codes = []
def active_xml_tags(self):
return ['VertrekkendeTrein']
def existing_objects_dictionary(self):
return self.delegate.stops_dictionary
def key_for_current_object(self):
if int(self.train_ref) > 0:
return '%s_%s' % (self.train_ref, self.delegate.code)
def create_new_object(self, key):
new_stop = TAStop()
new_stop.station_id = self.delegate.station_id
new_stop.mission_id = mission_id_from_code(self.train_ref)
return new_stop
def start_xml_element(self, name, attrs):
if name == 'VertrekkendeTrein':
self.train_ref = ''
self.stop_status = StopStatuses.announced
self.departure = ''
self.delay = ''
self.destination = ''
self.alt_destination = None
self.platform = ''
self.platform_change = False
elif name == 'VertrekSpoor':
if attrs['wijziging'].lower() == 'true':
self.platform_change = True
elif name == 'error':
self.error = True
def end_xml_element(self, name):
if name == 'RitNummer':
self.train_ref = ''.join(self.data)
number = int(self.train_ref)
if number > 1E5:
original_code = str(number % 100000)
self.replaced_mission_codes.append(original_code)
elif name == 'VertrekTijd':
self.departure = ''.join(self.data)
elif name == 'VertrekVertraging':
self.delay = ''.join(self.data)
elif name == 'EindBestemming':
self.destination = ''.join(self.data)
elif name == 'VertrekSpoor':
self.platform = ''.join(self.data)
elif name == 'Opmerking':
remark = ''.join(self.data).strip()
words = remark.split()
if words == ['Niet', 'instappen']:
logging.info('Niet instappen in trein %s', self.train_ref)
elif words == ['Extra', 'trein']:
self.stop_status = StopStatuses.extra
elif words == ['Rijdt', 'vandaag', 'niet']:
self.stop_status = StopStatuses.canceled
elif len(words) > 3 and words[0:3] == ['Rijdt', 'verder', 'naar']:
self.alt_destination = ' '.join(words[3:])
elif len(words) > 4 and words[0:4] == ['Rijdt', 'niet', 'verder', 'dan']:
self.alt_destination = ' '.join(words[4:])
else:
logging.warning('Unrecognized remark at %s about %s: %s' %
(self.delegate.station_id, self.train_ref, remark))
elif name == 'error':
raise NSRespondsWithError()
elif self.error and name == 'message':
message = ''.join(self.data).strip()
logging.warning('While requesting departures from %s, server responds: %s' %
(self.delegate.station_id, message))
def update_object(self, existing_object, name):
if existing_object.status != self.stop_status:
existing_object.status = self.stop_status
self.changes = True
delay = minutes_from_RFC3339_string(self.delay)
if existing_object.delay_dep != delay:
existing_object.delay_dep = delay
self.changes = True
if existing_object.destination != self.destination:
existing_object.destination = self.destination
self.changes = True
if existing_object.alteredDestination != self.alt_destination:
existing_object.alteredDestination = self.alt_destination
self.changes = True
if existing_object.platform != self.platform:
existing_object.platform = self.platform
existing_object.platformChange = self.platform_change
self.changes = True
departure = cet_from_string(self.departure)
if departure != existing_object.departure:
existing_object.departure = departure
self.changes = True
if not hasattr(self, 'last_departure') or departure > self.last_departure:
self.last_departure = departure
def save_objects(self):
for mission_code in self.replaced_mission_codes:
stop_code = '%s_%s' % (mission_code, self.delegate.code)
if not self.new_objects.get(stop_code, None):
stop = self.pop_from_old_objects(stop_code)
if stop:
stop.status = StopStatuses.canceled
self.updated_objects[stop_code] = stop
self.new_objects[stop_code] = stop
self.delegate.stops_dictionary = self.new_objects
self.delegate.nr_of_fetches += 1
if hasattr(self, 'last_departure'):
self.delegate.last_departure = self.last_departure
increase_counter('req_api_success')
self.delegate.cache_set()
class NSRespondsWithError(Exception):
pass
# ====== Helpers ==========================================================================
def mission_id_from_code(code):
if int(code) < 500:
country = 'eu'
else:
country = 'nl'
return '%s.%s' % (country, code)
def minutes_from_RFC3339_string(string):
duration = 0.0
m = re.match(r'PT([0-9]*H)?([0-9]*M)?([0-9]*S)?', string)
if m:
if m.group(1):
duration = 60 * int(m.group(1)[:-1])
if m.group(2):
duration += int(m.group(2)[:-1])
if m.group(3):
duration += float(m.group(3)[:-1]) / 60
return duration
def repr_list_from_stops(stops_list):
if stops_list is None:
return []
repr_list = []
for stop in stops_list:
repr_list.append(stop.repr)
return repr_list
|
{
"content_hash": "a511f27a8268ae0d149c05324e18e044",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 109,
"avg_line_length": 36.77183098591549,
"alnum_prop": 0.5363873142331852,
"repo_name": "firstflamingo/treinenaapje",
"id": "d7034d7387f4e908ff19c93e44d3c9eee32f1994",
"size": "13775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/TAStop.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "285713"
}
],
"symlink_target": ""
}
|
"""Support to trigger Maker IFTTT recipes."""
import json
import logging
import pyfttt
import requests
import voluptuous as vol
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.helpers import config_entry_flow
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
EVENT_RECEIVED = "ifttt_webhook_received"
ATTR_EVENT = "event"
ATTR_TARGET = "target"
ATTR_VALUE1 = "value1"
ATTR_VALUE2 = "value2"
ATTR_VALUE3 = "value3"
CONF_KEY = "key"
SERVICE_TRIGGER = "trigger"
SERVICE_TRIGGER_SCHEMA = vol.Schema(
{
vol.Required(ATTR_EVENT): cv.string,
vol.Optional(ATTR_TARGET): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_VALUE1): cv.string,
vol.Optional(ATTR_VALUE2): cv.string,
vol.Optional(ATTR_VALUE3): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DOMAIN): vol.Schema(
{vol.Required(CONF_KEY): vol.Any({cv.string: cv.string}, cv.string)}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the IFTTT service component."""
if DOMAIN not in config:
return True
api_keys = config[DOMAIN][CONF_KEY]
if isinstance(api_keys, str):
api_keys = {"default": api_keys}
def trigger_service(call):
"""Handle IFTTT trigger service calls."""
event = call.data[ATTR_EVENT]
targets = call.data.get(ATTR_TARGET, list(api_keys))
value1 = call.data.get(ATTR_VALUE1)
value2 = call.data.get(ATTR_VALUE2)
value3 = call.data.get(ATTR_VALUE3)
target_keys = dict()
for target in targets:
if target not in api_keys:
_LOGGER.error("No IFTTT api key for %s", target)
continue
target_keys[target] = api_keys[target]
try:
for target, key in target_keys.items():
res = pyfttt.send_event(key, event, value1, value2, value3)
if res.status_code != 200:
_LOGGER.error("IFTTT reported error sending event to %s.", target)
except requests.exceptions.RequestException:
_LOGGER.exception("Error communicating with IFTTT")
hass.services.async_register(
DOMAIN, SERVICE_TRIGGER, trigger_service, schema=SERVICE_TRIGGER_SCHEMA
)
return True
async def handle_webhook(hass, webhook_id, request):
"""Handle webhook callback."""
body = await request.text()
try:
data = json.loads(body) if body else {}
except ValueError:
return None
if isinstance(data, dict):
data["webhook_id"] = webhook_id
hass.bus.async_fire(EVENT_RECEIVED, data)
async def async_setup_entry(hass, entry):
"""Configure based on config entry."""
hass.components.webhook.async_register(
DOMAIN, "IFTTT", entry.data[CONF_WEBHOOK_ID], handle_webhook
)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID])
return True
# pylint: disable=invalid-name
async_remove_entry = config_entry_flow.webhook_async_remove_entry
|
{
"content_hash": "2b073ec262bc21ece08335ad6fbff786",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 86,
"avg_line_length": 27.801724137931036,
"alnum_prop": 0.6446511627906977,
"repo_name": "qedi-r/home-assistant",
"id": "05d773e9fd687a0c36749a268acfc3fda32a2185",
"size": "3225",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/ifttt/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18564720"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
from django.utils.encoding import force_unicode
from django.conf import settings
from django import forms
import datetime, time
from django.utils.safestring import mark_safe
from django.core.validators import EMPTY_VALUES
calbtn = u"""<img src="%(media_url)scalendar/cal.gif" alt="kalendarz" id="%(field_id)s_btn" style="cursor: pointer; float:left;position:relative;top:2px;" title="Kalendarz"
onmouseover="this.style.background='#444444';" onmouseout="this.style.background=''" class="calendar-button"/>
<script type="text/javascript">
Calendar.setup({
inputField : "%(field_id)s",
dateFormat : "%(date_format)s",
trigger : "%(field_id)s_btn",
showTime : false,
titleFormat : "%(title_format)s",
min:%(min_date)s,
max:%(max_date)s
});
</script>"""
calbtn_always_visible = u"""<div id="%(field_id)s_container" title="Kalendarz"></div>
<script type="text/javascript">
Calendar.setup({
cont : "%(field_id)s_container",
dateFormat : "%(date_format)s",
showTime : false,
titleFormat : "%(title_format)s",
min:%(min_date)s,
max:%(max_date)s,
onSelect:function(){
var date = this.selection.get();
date = Calendar.intToDate(date);
date = Calendar.printDate(date,"%(date_format)s");
var input = document.getElementById('%(field_id)s');
input.value = date;
}
});
</script>"""
class CalendarWidget(forms.widgets.TextInput):
"""
@Author: Adam Cupial
@Date: 12/10/2010
Short description
------------------
Django calendar widget, requires js and css files, uses Dynarch Calendar
Settings
------------------
date_format (default: '%Y-%m-%d'),
title_format (default: '%b %Y')
"""
def __init__(self, attrs=None, date_format='%Y-%m-%d',title_format='%b %Y',min_date=None, max_date=None):
self.date_format = date_format
self.title_format = title_format
if min_date:
self.min_date = min_date.strftime("%Y%m%d")
else:
self.min_date = 'null'
if max_date:
self.max_date = max_date.strftime("%Y%m%d")
else:
self.max_date = 'null'
super(CalendarWidget, self).__init__(attrs)
def _media(self):
return forms.Media(css={
'all': (settings.MEDIA_URL+'calendar/css/cal.css',)
},
js=(settings.MEDIA_URL+'calendar/js/jscal2.js',
settings.MEDIA_URL+'calendar/js/lang/pl.js')
)
media = property(_media)
def render(self, name, value, attrs=None):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '':
try:
final_attrs['value'] = \
force_unicode(value.strftime(self.date_format))
except:
final_attrs['value'] = \
force_unicode(value)
if not final_attrs.has_key('id'):
final_attrs['id'] = u'%s_id' % (name)
id = final_attrs['id']
cal = self.get_js() % {
'media_url':settings.MEDIA_URL,
'field_id':id,
'date_format':self.date_format,
'title_format':self.title_format,
'min_date':self.min_date,
'max_date':self.max_date
}
a = u'<input%s class="calendar-input"/>%s' % (forms.util.flatatt(final_attrs), cal)
return mark_safe(a)
def get_js(self):
return calbtn
def value_from_datadict(self, data, files, name):
dtf = settings.DATETIME_INPUT_FORMATS
empty_values = EMPTY_VALUES
value = data.get(name, None)
if value in empty_values:
return None
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
for format in dtf:
try:
return datetime.datetime(*time.strptime(value, format)[:6])
except ValueError:
continue
return None
class CalendarOnlyWidget(CalendarWidget):
input_type='hidden'
is_hidden = True
def get_js(self):
return calbtn_always_visible
|
{
"content_hash": "f0d7cfef0999dfaff3cd4b3a135ed706",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 172,
"avg_line_length": 33.798507462686565,
"alnum_prop": 0.5447118569220578,
"repo_name": "adamcupial/django-js-addons",
"id": "3329dae02a9d4b54e507ecd946402e5fff6dc6a2",
"size": "4529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "js_addons/calendar/widgets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "22659"
},
{
"name": "Python",
"bytes": "8990"
}
],
"symlink_target": ""
}
|
"""
A mix-in class implementing flow decomposition
"""
from collections import OrderedDict
from copy import copy, deepcopy
from hashlib import md5
import structlog
from voltha.protos import third_party
from voltha.protos import openflow_13_pb2 as ofp
_ = third_party
log = structlog.get_logger()
# aliases
ofb_field = ofp.ofp_oxm_ofb_field
action = ofp.ofp_action
# OFPAT_* shortcuts
OUTPUT = ofp.OFPAT_OUTPUT
COPY_TTL_OUT = ofp.OFPAT_COPY_TTL_OUT
COPY_TTL_IN = ofp.OFPAT_COPY_TTL_IN
SET_MPLS_TTL = ofp.OFPAT_SET_MPLS_TTL
DEC_MPLS_TTL = ofp.OFPAT_DEC_MPLS_TTL
PUSH_VLAN = ofp.OFPAT_PUSH_VLAN
POP_VLAN = ofp.OFPAT_POP_VLAN
PUSH_MPLS = ofp.OFPAT_PUSH_MPLS
POP_MPLS = ofp.OFPAT_POP_MPLS
SET_QUEUE = ofp.OFPAT_SET_QUEUE
GROUP = ofp.OFPAT_GROUP
SET_NW_TTL = ofp.OFPAT_SET_NW_TTL
NW_TTL = ofp.OFPAT_DEC_NW_TTL
SET_FIELD = ofp.OFPAT_SET_FIELD
PUSH_PBB = ofp.OFPAT_PUSH_PBB
POP_PBB = ofp.OFPAT_POP_PBB
EXPERIMENTER = ofp.OFPAT_EXPERIMENTER
# OFPXMT_OFB_* shortcuts (incomplete)
IN_PORT = ofp.OFPXMT_OFB_IN_PORT
IN_PHY_PORT = ofp.OFPXMT_OFB_IN_PHY_PORT
METADATA = ofp.OFPXMT_OFB_METADATA
ETH_DST = ofp.OFPXMT_OFB_ETH_DST
ETH_SRC = ofp.OFPXMT_OFB_ETH_SRC
ETH_TYPE = ofp.OFPXMT_OFB_ETH_TYPE
VLAN_VID = ofp.OFPXMT_OFB_VLAN_VID
VLAN_PCP = ofp.OFPXMT_OFB_VLAN_PCP
IP_DSCP = ofp.OFPXMT_OFB_IP_DSCP
IP_ECN = ofp.OFPXMT_OFB_IP_ECN
IP_PROTO = ofp.OFPXMT_OFB_IP_PROTO
IPV4_SRC = ofp.OFPXMT_OFB_IPV4_SRC
IPV4_DST = ofp.OFPXMT_OFB_IPV4_DST
TCP_SRC = ofp.OFPXMT_OFB_TCP_SRC
TCP_DST = ofp.OFPXMT_OFB_TCP_DST
UDP_SRC = ofp.OFPXMT_OFB_UDP_SRC
UDP_DST = ofp.OFPXMT_OFB_UDP_DST
SCTP_SRC = ofp.OFPXMT_OFB_SCTP_SRC
SCTP_DST = ofp.OFPXMT_OFB_SCTP_DST
ICMPV4_TYPE = ofp.OFPXMT_OFB_ICMPV4_TYPE
ICMPV4_CODE = ofp.OFPXMT_OFB_ICMPV4_CODE
ARP_OP = ofp.OFPXMT_OFB_ARP_OP
ARP_SPA = ofp.OFPXMT_OFB_ARP_SPA
ARP_TPA = ofp.OFPXMT_OFB_ARP_TPA
ARP_SHA = ofp.OFPXMT_OFB_ARP_SHA
ARP_THA = ofp.OFPXMT_OFB_ARP_THA
IPV6_SRC = ofp.OFPXMT_OFB_IPV6_SRC
IPV6_DST = ofp.OFPXMT_OFB_IPV6_DST
IPV6_FLABEL = ofp.OFPXMT_OFB_IPV6_FLABEL
ICMPV6_TYPE = ofp.OFPXMT_OFB_ICMPV6_TYPE
ICMPV6_CODE = ofp.OFPXMT_OFB_ICMPV6_CODE
IPV6_ND_TARGET = ofp.OFPXMT_OFB_IPV6_ND_TARGET
OFB_IPV6_ND_SLL = ofp.OFPXMT_OFB_IPV6_ND_SLL
IPV6_ND_TLL = ofp.OFPXMT_OFB_IPV6_ND_TLL
MPLS_LABEL = ofp.OFPXMT_OFB_MPLS_LABEL
MPLS_TC = ofp.OFPXMT_OFB_MPLS_TC
MPLS_BOS = ofp.OFPXMT_OFB_MPLS_BOS
PBB_ISID = ofp.OFPXMT_OFB_PBB_ISID
TUNNEL_ID = ofp.OFPXMT_OFB_TUNNEL_ID
IPV6_EXTHDR = ofp.OFPXMT_OFB_IPV6_EXTHDR
# ofp_action_* shortcuts
def output(port, max_len=ofp.OFPCML_MAX):
return action(
type=OUTPUT,
output=ofp.ofp_action_output(port=port, max_len=max_len)
)
def mpls_ttl(ttl):
return action(
type=SET_MPLS_TTL,
mpls_ttl=ofp.ofp_action_mpls_ttl(mpls_ttl=ttl)
)
def push_vlan(eth_type):
return action(
type=PUSH_VLAN,
push=ofp.ofp_action_push(ethertype=eth_type)
)
def pop_vlan():
return action(
type=POP_VLAN
)
def pop_mpls(eth_type):
return action(
type=POP_MPLS,
pop_mpls=ofp.ofp_action_pop_mpls(ethertype=eth_type)
)
def group(group_id):
return action(
type=GROUP,
group=ofp.ofp_action_group(group_id=group_id)
)
def nw_ttl(nw_ttl):
return action(
type=NW_TTL,
nw_ttl=ofp.ofp_action_nw_ttl(nw_ttl=nw_ttl)
)
def set_field(field):
return action(
type=SET_FIELD,
set_field=ofp.ofp_action_set_field(
field=ofp.ofp_oxm_field(
oxm_class=ofp.OFPXMC_OPENFLOW_BASIC,
ofb_field=field))
)
def experimenter(experimenter, data):
return action(
type=EXPERIMENTER,
experimenter=ofp.ofp_action_experimenter(
experimenter=experimenter, data=data)
)
# ofb_field generators (incomplete set)
def in_port(_in_port):
return ofb_field(type=IN_PORT, port=_in_port)
def in_phy_port(_in_phy_port):
return ofb_field(type=IN_PHY_PORT, port=_in_phy_port)
def metadata(_table_metadata):
return ofb_field(type=METADATA, table_metadata=_table_metadata)
def eth_dst(_eth_dst):
return ofb_field(type=ETH_DST, table_metadata=_eth_dst)
def eth_src(_eth_src):
return ofb_field(type=ETH_SRC, table_metadata=_eth_src)
def eth_type(_eth_type):
return ofb_field(type=ETH_TYPE, eth_type=_eth_type)
def vlan_vid(_vlan_vid):
return ofb_field(type=VLAN_VID, vlan_vid=_vlan_vid)
def vlan_pcp(_vlan_pcp):
return ofb_field(type=VLAN_PCP, vlan_pcp=_vlan_pcp)
def ip_dscp(_ip_dscp):
return ofb_field(type=IP_DSCP, ip_dscp=_ip_dscp)
def ip_ecn(_ip_ecn):
return ofb_field(type=IP_ECN, ip_ecn=_ip_ecn)
def ip_proto(_ip_proto):
return ofb_field(type=IP_PROTO, ip_proto=_ip_proto)
def ipv4_src(_ipv4_src):
return ofb_field(type=IPV4_SRC, ipv4_src=_ipv4_src)
def ipv4_dst(_ipv4_dst):
return ofb_field(type=IPV4_DST, ipv4_dst=_ipv4_dst)
def tcp_src(_tcp_src):
return ofb_field(type=TCP_SRC, tcp_src=_tcp_src)
def tcp_dst(_tcp_dst):
return ofb_field(type=TCP_DST, tcp_dst=_tcp_dst)
def udp_src(_udp_src):
return ofb_field(type=UDP_SRC, udp_src=_udp_src)
def udp_dst(_udp_dst):
return ofb_field(type=UDP_DST, udp_dst=_udp_dst)
def sctp_src(_sctp_src):
return ofb_field(type=SCTP_SRC, sctp_src=_sctp_src)
def sctp_dst(_sctp_dst):
return ofb_field(type=SCTP_DST, sctp_dst=_sctp_dst)
def icmpv4_type(_icmpv4_type):
return ofb_field(type=ICMPV4_TYPE, icmpv4_type=_icmpv4_type)
def icmpv4_code(_icmpv4_code):
return ofb_field(type=ICMPV4_CODE, icmpv4_code=_icmpv4_code)
def arp_op(_arp_op):
return ofb_field(type=ARP_OP, arp_op=_arp_op)
def arp_spa(_arp_spa):
return ofb_field(type=ARP_SPA, arp_spa=_arp_spa)
def arp_tpa(_arp_tpa):
return ofb_field(type=ARP_TPA, arp_tpa=_arp_tpa)
def arp_sha(_arp_sha):
return ofb_field(type=ARP_SHA, arp_sha=_arp_sha)
def arp_tha(_arp_tha):
return ofb_field(type=ARP_THA, arp_tha=_arp_tha)
def ipv6_src(_ipv6_src):
return ofb_field(type=IPV6_SRC, arp_tha=_ipv6_src)
def ipv6_dst(_ipv6_dst):
return ofb_field(type=IPV6_DST, arp_tha=_ipv6_dst)
def ipv6_flabel(_ipv6_flabel):
return ofb_field(type=IPV6_FLABEL, arp_tha=_ipv6_flabel)
def ipmpv6_type(_icmpv6_type):
return ofb_field(type=ICMPV6_TYPE, arp_tha=_icmpv6_type)
def icmpv6_code(_icmpv6_code):
return ofb_field(type=ICMPV6_CODE, arp_tha=_icmpv6_code)
def ipv6_nd_target(_ipv6_nd_target):
return ofb_field(type=IPV6_ND_TARGET, arp_tha=_ipv6_nd_target)
def ofb_ipv6_nd_sll(_ofb_ipv6_nd_sll):
return ofb_field(type=OFB_IPV6_ND_SLL, arp_tha=_ofb_ipv6_nd_sll)
def ipv6_nd_tll(_ipv6_nd_tll):
return ofb_field(type=IPV6_ND_TLL, arp_tha=_ipv6_nd_tll)
def mpls_label(_mpls_label):
return ofb_field(type=MPLS_LABEL, arp_tha=_mpls_label)
def mpls_tc(_mpls_tc):
return ofb_field(type=MPLS_TC, arp_tha=_mpls_tc)
def mpls_bos(_mpls_bos):
return ofb_field(type=MPLS_BOS, arp_tha=_mpls_bos)
def pbb_isid(_pbb_isid):
return ofb_field(type=PBB_ISID, arp_tha=_pbb_isid)
def tunnel_id(_tunnel_id):
return ofb_field(type=TUNNEL_ID, arp_tha=_tunnel_id)
def ipv6_exthdr(_ipv6_exthdr):
return ofb_field(type=IPV6_EXTHDR, arp_tha=_ipv6_exthdr)
# frequently used extractors:
def get_actions(flow):
"""Extract list of ofp_action objects from flow spec object"""
assert isinstance(flow, ofp.ofp_flow_stats)
# we have the following hard assumptions for now
actions = []
for instruction in flow.instructions:
if instruction.type == ofp.OFPIT_APPLY_ACTIONS or instruction.type == ofp.OFPIT_WRITE_ACTIONS:
actions.extend(instruction.actions.actions)
return actions
def get_ofb_fields(flow):
assert isinstance(flow, ofp.ofp_flow_stats)
assert flow.match.type == ofp.OFPMT_OXM
ofb_fields = []
for field in flow.match.oxm_fields:
assert field.oxm_class == ofp.OFPXMC_OPENFLOW_BASIC
ofb_fields.append(field.ofb_field)
return ofb_fields
def get_out_port(flow):
for action in get_actions(flow):
if action.type == OUTPUT:
return action.output.port
return None
def get_in_port(flow):
for field in get_ofb_fields(flow):
if field.type == IN_PORT:
return field.port
return None
def get_goto_table_id(flow):
for instruction in flow.instructions:
if instruction.type == ofp.OFPIT_GOTO_TABLE:
return instruction.goto_table.table_id
return None
def get_metadata_from_write_metadata(flow):
for instruction in flow.instructions:
if instruction.type == ofp.OFPIT_WRITE_METADATA:
return instruction.write_metadata.metadata
return None
def get_egress_port_number_from_metadata(flow):
"""
Write metadata instruction value (metadata) is 8 bytes:
MS 2 bytes: C Tag
Next 2 bytes: Technology Profile Id
Next 4 bytes: Port number (uni or nni)
This is set in the ONOS OltPipeline as a write metadata instruction
"""
metadata = get_metadata_from_write_metadata(flow)
log.debug("The metadata for egress port", metadata=metadata)
if metadata is not None:
egress_port = metadata & 0xffffffff
log.debug("Found egress port", egress_port=egress_port)
return egress_port
return None
def get_tp_id_from_metadata(write_metadata_value):
return (write_metadata_value >> 32) & 0xffff
def get_inner_tag_from_write_metadata(flow):
"""
Write metadata instruction value (metadata) is 8 bytes:
MS 2 bytes: C Tag
Next 2 bytes: Technology Profile Id
Next 4 bytes: Port number (uni or nni)
This is set in the ONOS OltPipeline as a write metadata instruction
"""
metadata = get_metadata_from_write_metadata(flow)
log.debug("The metadata for inner tag", metadata=metadata)
if metadata is not None:
inner_tag = (metadata >> 48) & 0xffff
log.debug("Found inner tag", inner_tag=inner_tag)
return inner_tag
return None
# test and extract next table and group information
def has_next_table(flow):
return get_goto_table_id(flow) is not None
def has_vlan_mod_action(flow):
# set containing possible vlan mod actions
vlan_mod_actions_set = set([ofp.OFPAT_PUSH_VLAN, ofp.OFPAT_POP_VLAN, ofp.OFPAT_SET_FIELD])
vlan_actions = []
for instruction in flow.instructions:
if instruction.type == ofp.OFPIT_APPLY_ACTIONS or instruction.type == ofp.OFPIT_WRITE_ACTIONS:
for action in instruction.actions.actions:
vlan_actions.append(action.type)
# actions in the current flow
curr_vlan_action_set = set(vlan_actions)
# See if we have one more vlan mod actions in the current actions present in the flow
# Return True if we have at least one vlan mod action, else False.
if len(curr_vlan_action_set.intersection(vlan_mod_actions_set)):
return True
else:
return False
def get_group(flow):
for action in get_actions(flow):
if action.type == GROUP:
return action.group.group_id
return None
def get_meter_id_from_flow(flow):
for instruction in flow.instructions:
if instruction.type == ofp.OFPIT_METER:
return instruction.meter.meter_id
return None
def has_group(flow):
return get_group(flow) is not None
def mk_oxm_fields(match_fields):
oxm_fields=[
ofp.ofp_oxm_field(
oxm_class=ofp.OFPXMC_OPENFLOW_BASIC,
ofb_field=field
) for field in match_fields
]
return oxm_fields
def mk_instructions_from_actions(actions):
instructions_action = ofp.ofp_instruction_actions()
instructions_action.actions.extend(actions)
instruction = ofp.ofp_instruction(type=ofp.OFPIT_APPLY_ACTIONS,
actions=instructions_action)
return [instruction]
def mk_simple_flow_mod(match_fields, actions, command=ofp.OFPFC_ADD,
next_table_id=None, meter_id=None, metadata=None, **kw):
"""
Convenience function to generare ofp_flow_mod message with OXM BASIC match
composed from the match_fields, and single APPLY_ACTIONS instruction with
a list if ofp_action objects.
:param match_fields: list(ofp_oxm_ofb_field)
:param actions: list(ofp_action)
:param command: one of OFPFC_*
:param kw: additional keyword-based params to ofp_flow_mod
:return: initialized ofp_flow_mod object
"""
instructions = [
ofp.ofp_instruction(
type=ofp.OFPIT_APPLY_ACTIONS,
actions=ofp.ofp_instruction_actions(actions=actions)
)
]
if meter_id is not None:
instructions.append(ofp.ofp_instruction(
type=ofp.OFPIT_METER,
meter=ofp.ofp_instruction_meter(meter_id=meter_id)
))
if next_table_id is not None:
instructions.append(ofp.ofp_instruction(
type=ofp.OFPIT_GOTO_TABLE,
goto_table=ofp.ofp_instruction_goto_table(table_id=next_table_id)
))
if metadata is not None:
instructions.append(ofp.ofp_instruction(
type=ofp.OFPIT_WRITE_METADATA,
write_metadata=ofp.ofp_instruction_write_metadata(metadata=metadata)
))
return ofp.ofp_flow_mod(
command=command,
match=ofp.ofp_match(
type=ofp.OFPMT_OXM,
oxm_fields=[
ofp.ofp_oxm_field(
oxm_class=ofp.OFPXMC_OPENFLOW_BASIC,
ofb_field=field
) for field in match_fields
]
),
instructions=instructions,
**kw
)
def mk_multicast_group_mod(group_id, buckets, command=ofp.OFPGC_ADD):
group = ofp.ofp_group_mod(
command=command,
type=ofp.OFPGT_ALL,
group_id=group_id,
buckets=buckets
)
return group
def hash_flow_stats(flow):
"""
Return unique 64-bit integer hash for flow covering the following
attributes: 'table_id', 'priority', 'flags', 'cookie', 'match', '_instruction_string'
"""
_instruction_string = ""
for _instruction in flow.instructions:
_instruction_string += _instruction.SerializeToString()
hex = md5('{},{},{},{},{},{}'.format(
flow.table_id,
flow.priority,
flow.flags,
flow.cookie,
flow.match.SerializeToString(),
_instruction_string
)).hexdigest()
return int(hex[:16], 16)
def flow_stats_entry_from_flow_mod_message(mod):
flow = ofp.ofp_flow_stats(
table_id=mod.table_id,
priority=mod.priority,
idle_timeout=mod.idle_timeout,
hard_timeout=mod.hard_timeout,
flags=mod.flags,
cookie=mod.cookie,
match=mod.match,
instructions=mod.instructions
)
flow.id = hash_flow_stats(flow)
return flow
def group_entry_from_group_mod(mod):
group = ofp.ofp_group_entry(
desc=ofp.ofp_group_desc(
type=mod.type,
group_id=mod.group_id,
buckets=mod.buckets
),
stats=ofp.ofp_group_stats(
group_id=mod.group_id
# TODO do we need to instantiate bucket bins?
)
)
return group
def meter_entry_from_meter_mod(mod):
meter = ofp.ofp_meter_entry(
config=ofp.ofp_meter_config(
flags=mod.flags,
meter_id=mod.meter_id,
bands=mod.bands
),
stats=ofp.ofp_meter_stats(
meter_id=mod.meter_id,
flow_count=0,
packet_in_count=0,
byte_in_count=0,
duration_sec=0,
duration_nsec=0,
band_stats=[ofp.ofp_meter_band_stats(
packet_band_count=0,
byte_band_count=0
) for _ in range(len(mod.bands))]
)
)
return meter
def mk_flow_stat(**kw):
return flow_stats_entry_from_flow_mod_message(mk_simple_flow_mod(**kw))
def mk_group_stat(**kw):
return group_entry_from_group_mod(mk_multicast_group_mod(**kw))
class RouteHop(object):
__slots__ = ('_device', '_ingress_port', '_egress_port')
def __init__(self, device, ingress_port, egress_port):
self._device = device
self._ingress_port = ingress_port
self._egress_port = egress_port
@property
def device(self): return self._device
@property
def ingress_port(self): return self._ingress_port
@property
def egress_port(self): return self._egress_port
def __eq__(self, other):
return (
self._device == other._device and
self._ingress_port == other._ingress_port and
self._egress_port == other._egress_port)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return 'RouteHop device_id {}, ingress_port {}, egress_port {}'.format(
self._device.id, self._ingress_port, self._egress_port)
class FlowDecomposer(object):
def __init__(self, *args, **kw):
self.logical_device_id = 'this shall be overwritten in derived class'
super(FlowDecomposer, self).__init__(*args, **kw)
# ~~~~~~~~~~~~~~~~~~~~ methods exposed *to* derived class ~~~~~~~~~~~~~~~~~
def decompose_rules(self, flows, groups):
"""
Generate per-device flows and flow-groups from the flows and groups
defined on a logical device
:param flows: logical device flows
:param groups: logical device flow groups
:return: dict(device_id ->
(OrderedDict-of-device-flows, OrderedDict-of-device-flow-groups))
"""
device_rules = deepcopy(self.get_all_default_rules())
group_map = dict((g.desc.group_id, g) for g in groups)
for flow in flows:
for device_id, (_flows, _groups) \
in self.decompose_flow(flow, group_map).iteritems():
fl_lst, gr_lst = device_rules.setdefault(
device_id, (OrderedDict(), OrderedDict()))
for _flow in _flows:
if _flow.id not in fl_lst:
fl_lst[_flow.id] = _flow
for _group in _groups:
if _group.group_id not in gr_lst:
gr_lst[_group.group_id] = _group
return device_rules
def decompose_flow(self, flow, group_map):
assert isinstance(flow, ofp.ofp_flow_stats)
####################################################################
#
# limited, heuristics based implementation
# needs to be replaced, see https://jira.opencord.org/browse/CORD-841
#
####################################################################
in_port_no = get_in_port(flow)
out_port_no = get_out_port(flow) # may be None
device_rules = {} # accumulator
route = self.get_route(in_port_no, out_port_no)
if route is None:
log.error('no-route', in_port_no=in_port_no,
out_port_no=out_port_no, comment='deleting flow')
self.flow_delete(flow)
return device_rules
assert len(route) == 2
ingress_hop, egress_hop = route
def is_downstream():
return ingress_hop.device.root
def is_upstream():
return not is_downstream()
meter_id = get_meter_id_from_flow(flow)
metadata_from_write_metadata = get_metadata_from_write_metadata(flow)
# first identify trap flows for packets from UNI or NNI ports
if out_port_no is not None and \
(out_port_no & 0x7fffffff) == ofp.OFPP_CONTROLLER:
# CONTROLLER-BOUND FLOW
# TODO: support in-band control as an option
if in_port_no == self._nni_logical_port_no:
# TODO handle multiple NNI ports
log.debug('decomposing-trap-flow-from-nni', match=flow.match)
# no decomposition required - it is already an OLT flow from NNI
fl_lst, _ = device_rules.setdefault(
ingress_hop.device.id, ([], []))
fl_lst.append(flow)
else:
log.debug('decomposing-trap-flow-from-uni', match=flow.match)
# we assume that the ingress device is already pushing a
# customer-specific vlan (c-vid) or default vlan id
# so there is nothing else to do on the ONU
# XXX is this a correct assumption?
fl_lst, _ = device_rules.setdefault(
egress_hop.device.id, ([], []))
# wildcarded input port matching is not handled
if in_port_no is None:
log.error('wildcarded-input-not-handled', flow=flow,
comment='deleting flow')
self.flow_delete(flow)
return device_rules
# need to map the input UNI port to the corresponding PON port
fl_lst.append(mk_flow_stat(
priority=flow.priority,
cookie=flow.cookie,
match_fields=[
in_port(egress_hop.ingress_port.port_no)
] + [
field for field in get_ofb_fields(flow)
if field.type not in (IN_PORT,)
],
actions=[action for action in get_actions(flow)],
meter_id=meter_id,
metadata=metadata_from_write_metadata
))
else:
# NOT A CONTROLLER-BOUND FLOW
# we assume that the controller has already ensured the right
# actions for cases where
# a) vlans are pushed or popped at onu and olt
# b) C-vlans are transparently forwarded
if is_upstream():
if flow.table_id == 0 and has_next_table(flow):
# This is an ONU flow in upstream direction
assert out_port_no is None
log.debug('decomposing-onu-flow-in-upstream', match=flow.match)
fl_lst, _ = device_rules.setdefault(
ingress_hop.device.id, ([], []))
fl_lst.append(mk_flow_stat(
priority=flow.priority,
cookie=flow.cookie,
match_fields=[
in_port(ingress_hop.ingress_port.port_no)
] + [
field for field in get_ofb_fields(flow)
if field.type not in (IN_PORT,)
],
actions=[
action for action in get_actions(flow)
] + [
output(ingress_hop.egress_port.port_no)
],
meter_id=meter_id,
metadata=metadata_from_write_metadata
))
elif flow.table_id == 0 and not has_next_table(flow) and \
out_port_no is None:
# This is an ONU drop flow for untagged packets at the UNI
log.debug('decomposing-onu-drop-flow-upstream', match=flow.match)
fl_lst, _ = device_rules.setdefault(
ingress_hop.device.id, ([], []))
fl_lst.append(mk_flow_stat(
priority=flow.priority,
cookie=flow.cookie,
match_fields=[
in_port(ingress_hop.ingress_port.port_no)
] + [
vlan_vid(0) # OFPVID_NONE indicating untagged
],
actions=[] # no action is drop
))
elif flow.table_id == 1 and out_port_no is not None:
# This is OLT flow in upstream direction
log.debug('decomposing-olt-flow-in-upstream', match=flow.match)
fl_lst, _ = device_rules.setdefault(
egress_hop.device.id, ([], []))
fl_lst.append(mk_flow_stat(
priority=flow.priority,
cookie=flow.cookie,
match_fields=[
in_port(egress_hop.ingress_port.port_no),
] + [
field for field in get_ofb_fields(flow)
if field.type not in (IN_PORT, )
],
actions=[
action for action in get_actions(flow)
if action.type != OUTPUT
] + [
output(egress_hop.egress_port.port_no)
],
meter_id=meter_id,
metadata=metadata_from_write_metadata
))
elif flow.table_id == 0 and out_port_no is not None \
and not has_vlan_mod_action(flow):
# Transparent upstream flow
log.debug('decomposing-transparent-olt-flow-in-upstream', match=flow.match)
fl_lst, _ = device_rules.setdefault(
egress_hop.device.id, ([], []))
fl_lst.append(mk_flow_stat(
priority=flow.priority,
cookie=flow.cookie,
match_fields=[
in_port(egress_hop.ingress_port.port_no),
] + [
field for field in get_ofb_fields(flow)
if field.type not in (IN_PORT, )
],
actions=[
action for action in get_actions(flow)
if action.type != OUTPUT
] + [
output(egress_hop.egress_port.port_no)
],
meter_id=meter_id,
metadata=metadata_from_write_metadata
))
else:
# unknown upstream flow
log.error('unknown-upstream-flow', flow=flow,
comment='deleting flow')
self.flow_delete(flow)
return device_rules
else: # downstream
if flow.table_id == 0 and has_next_table(flow):
# OLT flow in downstream direction (unicast traffic)
assert out_port_no is None
log.debug('decomposing-olt-flow-in-downstream', match=flow.match)
# For downstream flows without output port action we need to
# recalculate route with the output extracted from the metadata
# to determine the PON port to send to the correct ONU/UNI
egress_port_number = get_egress_port_number_from_metadata(flow)
if egress_port_number is not None:
route = self.get_route(in_port_no, egress_port_number)
if route is None:
log.error('no-route-downstream', in_port_no=in_port_no,
egress_port_number=egress_port_number, comment='deleting flow')
self.flow_delete(flow)
return device_rules
assert len(route) == 2
ingress_hop, egress_hop = route
fl_lst, _ = device_rules.setdefault(
ingress_hop.device.id, ([], []))
fl_lst.append(mk_flow_stat(
priority=flow.priority,
cookie=flow.cookie,
match_fields=[
in_port(ingress_hop.ingress_port.port_no)
] + [
field for field in get_ofb_fields(flow)
if field.type not in (IN_PORT,)
],
actions=[
action for action in get_actions(flow)
] + [
output(ingress_hop.egress_port.port_no)
],
meter_id=meter_id,
metadata=metadata_from_write_metadata
))
elif flow.table_id == 0 and out_port_no is not None \
and not has_vlan_mod_action(flow):
# Transparent downstream flow
log.debug('decomposing-transparent-olt-flow-in-downstream', match=flow.match)
# For downstream flows without output port action we need to
# recalculate route with the output extracted from the metadata
# to determine the PON port to send to the correct ONU/UNI
egress_port_number = get_egress_port_number_from_metadata(flow)
if egress_port_number is not None:
route = self.get_route(in_port_no, egress_port_number)
if route is None:
log.error('no-route-downstream', in_port_no=in_port_no,
egress_port_number=egress_port_number, comment='deleting flow')
self.flow_delete(flow)
return device_rules
assert len(route) == 2
ingress_hop, egress_hop = route
fl_lst, _ = device_rules.setdefault(
ingress_hop.device.id, ([], []))
fl_lst.append(mk_flow_stat(
priority=flow.priority,
cookie=flow.cookie,
match_fields=[
in_port(ingress_hop.ingress_port.port_no)
] + [
field for field in get_ofb_fields(flow)
if field.type not in (IN_PORT,)
],
actions=[
action for action in get_actions(flow)
] + [
output(ingress_hop.egress_port.port_no)
],
meter_id=meter_id,
metadata=metadata_from_write_metadata
))
elif flow.table_id == 1 and out_port_no is not None:
# ONU flow in downstream direction (unicast traffic)
log.debug('decomposing-onu-flow-in-downstream', match=flow.match)
fl_lst, _ = device_rules.setdefault(
egress_hop.device.id, ([], []))
fl_lst.append(mk_flow_stat(
priority=flow.priority,
cookie=flow.cookie,
match_fields=[
in_port(egress_hop.ingress_port.port_no)
] + [
field for field in get_ofb_fields(flow)
if field.type not in (IN_PORT,)
],
actions=[
action for action in get_actions(flow)
if action.type not in (OUTPUT,)
] + [
output(egress_hop.egress_port.port_no)
],
meter_id=meter_id,
metadata=metadata_from_write_metadata
))
elif flow.table_id == 0 and has_group(flow):
# Multicast Flow
log.debug('decomposing-multicast-flow')
grp_id = get_group(flow)
fl_lst_olt, _ = device_rules.setdefault(
ingress_hop.device.id, ([], []))
# having no group yet is the same as having a group with
# no buckets
group = group_map.get(grp_id, ofp.ofp_group_entry())
for bucket in group.desc.buckets:
found_pop_vlan = False
other_actions = []
for action in bucket.actions:
if action.type == POP_VLAN:
found_pop_vlan = True
elif action.type == OUTPUT:
out_port_no = action.output.port
else:
other_actions.append(action)
# re-run route request to determine egress device and
# ports
route2 = self.get_route(in_port_no, out_port_no)
if not route2 or len(route2) != 2:
log.error('mc-no-route', in_port_no=in_port_no,
out_port_no=out_port_no, route2=route2,
comment='deleting flow')
self.flow_delete(flow)
continue
ingress_hop2, egress_hop = route2
if ingress_hop.ingress_port != ingress_hop2.ingress_port:
log.error('mc-ingress-hop-hop2-mismatch',
ingress_hop=ingress_hop,
ingress_hop2=ingress_hop2,
in_port_no=in_port_no,
out_port_no=out_port_no,
comment='ignoring flow')
continue
fl_lst_olt.append(mk_flow_stat(
priority=flow.priority,
cookie=flow.cookie,
match_fields=[
in_port(ingress_hop.ingress_port.port_no)
] + [
field for field in get_ofb_fields(flow)
if field.type not in (IN_PORT,)
],
actions=[
action for action in get_actions(flow)
if action.type not in (GROUP,)
] + [
pop_vlan(),
output(egress_hop.ingress_port.port_no)
]
))
fl_lst_onu, _ = device_rules.setdefault(
egress_hop.device.id, ([], []))
fl_lst_onu.append(mk_flow_stat(
priority=flow.priority,
cookie=flow.cookie,
match_fields=[
in_port(egress_hop.ingress_port.port_no)
] + [
field for field in get_ofb_fields(flow)
if field.type not in (IN_PORT, VLAN_VID, VLAN_PCP)
],
actions=other_actions + [
output(egress_hop.egress_port.port_no)
]
))
else:
log.error('unknown-downstream-flow', flow=flow,
comment='deleting flow')
self.flow_delete(flow)
return device_rules
# ~~~~~~~~~~~~ methods expected to be provided by derived class ~~~~~~~~~~~
def get_all_default_rules(self):
raise NotImplementedError('derived class must provide')
def get_default_rules(self, device_id):
raise NotImplementedError('derived class must provide')
def get_route(self, ingress_port_no, egress_port_no):
raise NotImplementedError('derived class must provide')
def get_wildcard_input_ports(self, exclude_port=None):
raise NotImplementedError('derived class must provide')
def flow_delete(self, mod):
raise NotImplementedError('derived class must provide')
|
{
"content_hash": "b3a3924bd0d828bd76e184f518d36367",
"timestamp": "",
"source": "github",
"line_count": 995,
"max_line_length": 102,
"avg_line_length": 36.64723618090452,
"alnum_prop": 0.5369953927161035,
"repo_name": "opencord/voltha",
"id": "8027beeee765abe71072a6cff648947eb3b0199b",
"size": "37065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "voltha/core/flow_decomposer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "30265"
},
{
"name": "Dockerfile",
"bytes": "2881"
},
{
"name": "Go",
"bytes": "181529"
},
{
"name": "Jinja",
"bytes": "25855"
},
{
"name": "Makefile",
"bytes": "76329"
},
{
"name": "Python",
"bytes": "9758796"
},
{
"name": "RobotFramework",
"bytes": "10188"
},
{
"name": "Ruby",
"bytes": "1126"
},
{
"name": "Shell",
"bytes": "758475"
},
{
"name": "XSLT",
"bytes": "175917"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
from typing import Dict, Type
from .base import DashboardsServiceTransport
from .grpc import DashboardsServiceGrpcTransport
from .grpc_asyncio import DashboardsServiceGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[DashboardsServiceTransport]]
_transport_registry["grpc"] = DashboardsServiceGrpcTransport
_transport_registry["grpc_asyncio"] = DashboardsServiceGrpcAsyncIOTransport
__all__ = (
"DashboardsServiceTransport",
"DashboardsServiceGrpcTransport",
"DashboardsServiceGrpcAsyncIOTransport",
)
|
{
"content_hash": "09b500695e102a5a9e23ab1714b0c152",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 88,
"avg_line_length": 32.73684210526316,
"alnum_prop": 0.815112540192926,
"repo_name": "googleapis/python-monitoring-dashboards",
"id": "7057d44554888f0dd84a43156ae7bfce453827b9",
"size": "1224",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/monitoring/dashboard_v1/services/dashboards_service/transports/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "452940"
},
{
"name": "Shell",
"bytes": "30705"
}
],
"symlink_target": ""
}
|
from pyspark import tests
from pyspark.sql import SparkSession
from examples import depSemisupervisedMnist
from pyspark.sql import functions as F
import math
class Test_Semisupervised_Mnist(tests.ReusedPySparkTestCase):
def setUp(self):
str_input = '/home/svanhmic/workspace/data/DABAI/mnist/train.csv'
self.spark = SparkSession(sparkContext=self.sc)
self.data_frame = self.spark.read.csv(
path=str_input, header=True, inferSchema=True,
mode='PERMISSIVE', nullValue=float('NAN'), nanValue=float('NAN'))
def test_create_nan_labels(self):
fraction = 0.1
input_data_frame = self.data_frame.filter(F.col('label').isin([0,1]))
output_data_frame = depSemisupervisedMnist.create_nan_labels(
self.sc, dataframe=input_data_frame, label_col='label', fraction=fraction)
# TEST 1: Does it contain missing_*label_name*?
self.assertIn(member='missing_label', container=output_data_frame.columns)
# TEST 2: Does the missing_factor correspond to the actual amount of missings?
computed_fractions = (output_data_frame.filter(~F.isnan('missing_label'))
.groupBy('missing_label').count().rdd.collectAsMap())
desired_frac = input_data_frame.groupBy('label').count().collect()
desired_fractions = dict(map(lambda x: (x['label'], fraction*x['count']), desired_frac))
for key, val in computed_fractions.items():
self.assertAlmostEqual(val, desired_fractions[key], delta=input_data_frame.count()*0.01) # 1 percent deviation
def test_enlarge_dataset(self):
original_size = 1000
input_df = self.data_frame.limit(original_size)
# Test 1: reduce the size to 90
new_size = 900
output_data_frame = depSemisupervisedMnist.enlarge_dataset(
dataframe=input_df, size= new_size, feature_cols=['pixel'+str(i) for i in range(784)])
self.assertAlmostEqual(output_data_frame.count(), new_size, delta=original_size*0.05)
# Test 2: enlargen to double size
new_size = 2000
output_data_frame = depSemisupervisedMnist.enlarge_dataset(
dataframe=input_df, size= new_size, feature_cols=['pixel'+str(i) for i in range(784)])
self.assertAlmostEqual(output_data_frame.count(), new_size, delta=new_size*0.05)
def test_subset_dataset_by_label(self):
# Test 1:
output_data_frame = depSemisupervisedMnist.subset_dataset_by_label(
self.sc, self.data_frame, 'label',0 ,1, 2)
distinct_label = output_data_frame.select('label').distinct().collect()
for val in map(lambda x: x['label'],distinct_label):
self.assertIn(val, [0,1,2])
def test_compute_fraction(self):
# TEST 1: Check for constant fractions
frac = 0.1
broad_cast_frac = self.sc.broadcast(frac)
computed_dict = depSemisupervisedMnist._compute_fraction(
sc=self.sc, dataframe=self.data_frame, fraction=frac,
label_col='label')
self.assertListEqual(list1=list(range(10)), list2=list(computed_dict.keys()))
for key, val in computed_dict.items():
self.assertEqual(val, 0.1)
# TEST 2: Check for variable fractions
actual_fractions = dict(zip(map(lambda x: str(x), range(10)), [0.1, 0.2, 0.3, 0.4, 0.4, 0.5, 0.8, 0.9, 0.01, 0.002]))
try:
variable_dict = depSemisupervisedMnist._compute_fraction(
sc=self.sc, dataframe=self.data_frame, label_col='label', **actual_fractions)
except TypeError as te:
print(te)
print(actual_fractions)
for key, val in variable_dict.items():
self.assertEqual(first=val, second=variable_dict[key])
|
{
"content_hash": "ed2d04481deae65f7d5d928e288e7bef",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 125,
"avg_line_length": 45.33734939759036,
"alnum_prop": 0.6486845601913367,
"repo_name": "mssalvador/WorkflowCleaning",
"id": "3cb7d7fa5109d283cf34b1d5d7ed78a0fe290c35",
"size": "3763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/examples/test_semisupervised_mnist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1053440"
},
{
"name": "Makefile",
"bytes": "396"
},
{
"name": "Python",
"bytes": "179722"
}
],
"symlink_target": ""
}
|
from threading import Lock
class AutocompleteContext(object):
'''
An autocomplete context.
Singleton.
'''
def __init__(self):
self.lock = Lock()
self._is_open = False
self._id = None
self._request_id = None
self._results = []
self._formatted_results = []
def __enter__(self):
self.lock.acquire()
self._is_open = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.lock.release()
self._is_open = False
@property
def id(self):
assert self._is_open, 'must open context first -- use as a context manager'
return self._id
@id.setter
def id(self, value):
assert self._is_open, 'must open context first -- use as a context manager'
self._id = value
@property
def request_id(self):
assert self._is_open, 'must open context first -- use as a context manager'
return self._request_id
@request_id.setter
def request_id(self, value):
assert self._is_open, 'must open context first -- use as a context manager'
self._request_id = value
@property
def results(self):
assert self._is_open, 'must open context first -- use as a context manager'
return self._results
@results.setter
def results(self, value):
assert self._is_open, 'must open context first -- use as a context manager'
self._results = value
@property
def formatted_results(self):
return self._formatted_results
@formatted_results.setter
def formatted_results(self, value):
self._formatted_results = value
def invalidate(self):
assert self._is_open, 'must open context first -- use as a context manager'
self.invalidate_results()
self._request_id = None
self._id = None
def invalidate_results(self):
assert self._is_open, 'must open context first -- use as a context manager'
self._results = []
self._formatted_results = []
|
{
"content_hash": "1905a903b7bba9b266d03a9246d0c191",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 83,
"avg_line_length": 27.11842105263158,
"alnum_prop": 0.5992236778262979,
"repo_name": "guillermooo/dart-sublime-bundle-releases",
"id": "3c1f80eeb1188fb62ad21be020aa47d0a90e5c38",
"size": "2061",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/autocomplete.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "10564"
},
{
"name": "PowerShell",
"bytes": "464"
},
{
"name": "Python",
"bytes": "713856"
},
{
"name": "Shell",
"bytes": "61"
}
],
"symlink_target": ""
}
|
"""Contrastive loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow.compat.v1 as tf
from tensorflow.compiler.tf2xla.python import xla # pylint: disable=g-direct-tensorflow-import
FLAGS = flags.FLAGS
LARGE_NUM = 1e9
def add_supervised_loss(labels, logits, weights, **kwargs):
"""Compute loss for model and add it to loss collection."""
return tf.losses.softmax_cross_entropy(labels, logits, weights, **kwargs)
def add_contrastive_loss(hidden,
hidden_norm=True,
temperature=1.0,
tpu_context=None,
weights=1.0):
"""Compute the instance discrimination loss for the model.
Args:
hidden: hidden vector (`Tensor`) of shape (bsz, dim).
hidden_norm: whether or not to use normalization on the hidden vector.
temperature: a `floating` number for temperature scaling.
tpu_context: context information for tpu.
weights: a weighting number or vector.
Returns:
A loss scalar.
"""
# Get (normalized) hidden1 and hidden2.
if hidden_norm:
hidden = tf.math.l2_normalize(hidden, -1)
splitted_list = tf.split(hidden, 2 + FLAGS.support_size, 0)
batch_size = tf.shape(splitted_list[0])[0]
splitted_list_expanded = [tf.expand_dims(elem, 1) for elem in splitted_list]
splitted_list_concat = tf.concat(splitted_list_expanded, 1)
if tpu_context is not None:
splitted_list_large = [
tpu_cross_replica_concat(elem, tpu_context) for elem in splitted_list
]
batch_size_large = tf.shape(splitted_list_large[0])[0]
replica_id = tf.cast(tf.cast(xla.replica_id(), tf.uint32), tf.int32)
sample_idx = tf.range(batch_size) + replica_id * batch_size
masks = tf.one_hot(sample_idx, batch_size_large)
else:
splitted_list_large = splitted_list
batch_size_large = batch_size
masks = tf.one_hot(tf.range(batch_size), batch_size)
masks_all = tf.tile(masks, [1, len(splitted_list)])
splitted_list_large_concat = tf.concat(splitted_list_large, 0)
all_scores = tf.matmul(
splitted_list_concat, splitted_list_large_concat, transpose_b=True)
all_scores = tf.reduce_max(all_scores, axis=1)
all_scores = all_scores - masks_all * LARGE_NUM
# Topk strategy.
topk = FLAGS.fn_topk
_, topk_inds = tf.math.top_k(all_scores, k=topk)
# Generates mask with elements greater than the threshold.
threshold = FLAGS.fn_threshold
dense_mask = tf.math.greater(all_scores, threshold)
dense_mask = tf.cast(dense_mask, tf.float32)
# Creates a binary mask with top k elements being 1.0.
topk_inds_expanded = tf.expand_dims(topk_inds, -1)
batch_ind = tf.tile(
tf.reshape(tf.range(batch_size), [-1, 1, 1]), [1, topk, 1])
taken_ind = tf.reshape(
tf.concat([batch_ind, topk_inds_expanded], -1), [-1, 2])
taken_ind = tf.cast(taken_ind, tf.int64)
sparse_mask = tf.sparse.SparseTensor(
taken_ind, tf.ones([tf.shape(taken_ind)[0]]),
[batch_size, tf.shape(splitted_list_large_concat)[0]])
dense_mask_topk = tf.sparse.to_dense(sparse_mask, validate_indices=False)
# Combines two masks.
dense_mask = dense_mask*dense_mask_topk
# Computes loss.
total_loss = 0.0
for anchor_ind in range(len(splitted_list)):
view_list = []
log_numerator = 0
for view_ind in range(len(splitted_list)):
anchor_view = tf.matmul(
splitted_list[anchor_ind],
splitted_list_large[view_ind],
transpose_b=True) / temperature
if view_ind == anchor_ind:
anchor_view = anchor_view - masks*LARGE_NUM
else:
log_numerator += tf.reduce_sum(anchor_view * masks, axis=1)
view_list.append(anchor_view)
denominator = tf.concat(view_list, 1)
log_numerator += tf.reduce_sum(denominator * dense_mask, axis=1)
log_numerator = -log_numerator / (
len(splitted_list) - 1.0 + tf.reduce_sum(dense_mask, axis=1))
log_denominator = tf.math.reduce_logsumexp(denominator, axis=1)
cur_loss = tf.reduce_mean(log_numerator + log_denominator)
total_loss = total_loss + cur_loss
total_loss = total_loss*2.0/(len(splitted_list))
total_loss = total_loss*weights
tf.losses.add_loss(total_loss)
return total_loss
def add_contrastive_loss_with_memory(hidden,
memory,
hidden_ema,
hidden_norm=True,
temperature=1.0,
weights=1.0):
"""Compute the instance discrimination loss for the model.
Args:
hidden: hidden vector (`Tensor`) of shape (bsz, dim).
memory: the memory bank.
hidden_ema: samples from the momentum encoder of the current batch.
hidden_norm: whether or not to use normalization on the hidden vector.
temperature: a `floating` number for temperature scaling.
weights: a weighting number or vector.
Returns:
A loss scalar.
"""
# Get (normalized) hidden1 and hidden2.
if hidden_norm:
hidden = tf.math.l2_normalize(hidden, -1)
splitted_list = tf.split(hidden, 2 + FLAGS.support_size, 0)
batch_size = tf.shape(splitted_list[0])[0]
batch_size_large = FLAGS.train_batch_size
replica_id = tf.cast(tf.cast(xla.replica_id(), tf.uint32), tf.int32)
sample_idx = tf.range(batch_size) + replica_id * batch_size
masks = tf.one_hot(sample_idx, batch_size_large)
masks_all = tf.tile(masks, [1, len(splitted_list)])
masks_all_mem = tf.concat([
masks_all,
tf.zeros([
batch_size, batch_size_large *
(FLAGS.memory_multiplier - len(splitted_list))
])
], -1)
# Computes the support set from the memory bank.
splitted_list_mem = tf.split(hidden_ema, 2 + FLAGS.support_size, 0)
splitted_list_mem_expanded = [
tf.expand_dims(elem, 1) for elem in splitted_list_mem
]
splitted_list_mem_concat = tf.concat(splitted_list_mem_expanded, 1)
# Computes similairity scores.
all_scores = tf.matmul(splitted_list_mem_concat, memory, transpose_b=True)
all_scores = tf.reduce_max(all_scores, axis=1) # batch x mem
all_scores = all_scores - masks_all_mem * LARGE_NUM
# Topk strategy.
topk = FLAGS.fn_topk
_, topk_inds = tf.math.top_k(all_scores, k=topk)
# Generates mask with elements greater than the threshold.
threshold = FLAGS.fn_threshold
dense_mask = tf.math.greater(all_scores, threshold)
dense_mask = tf.cast(dense_mask, tf.float32)
# Creates a binary mask with top k elements being 1.0.
topk_inds_expanded = tf.expand_dims(topk_inds, -1)
batch_ind = tf.tile(
tf.reshape(tf.range(batch_size), [-1, 1, 1]), [1, topk, 1])
taken_ind = tf.reshape(
tf.concat([batch_ind, topk_inds_expanded], -1), [-1, 2])
taken_ind = tf.cast(taken_ind, tf.int64)
sparse_mask = tf.sparse.SparseTensor(
taken_ind, tf.ones([tf.shape(taken_ind)[0]]),
[batch_size, tf.shape(memory)[0]])
dense_mask_topk = tf.sparse.to_dense(sparse_mask, validate_indices=False)
# Combines two masks.
dense_mask = dense_mask*dense_mask_topk
total_loss = 0.0
for anchor_ind in range(len(splitted_list)):
# Builds the mask for the current anchor.
if anchor_ind == 0:
mask_anchor = tf.concat([
masks,
tf.zeros(
[batch_size, batch_size_large * (FLAGS.memory_multiplier - 1)])
], axis=-1)
else:
mask_anchor = tf.concat(
[tf.zeros([batch_size, batch_size_large * anchor_ind]), masks],
axis=-1)
mask_anchor = tf.concat([
mask_anchor,
tf.zeros([
batch_size, batch_size_large *
(FLAGS.memory_multiplier - 1 - anchor_ind)
])
], axis=-1)
anchor_mem = tf.matmul(
splitted_list[anchor_ind], memory, transpose_b=True) / temperature
log_numerator = tf.reduce_sum(
anchor_mem * (masks_all_mem * (1. - mask_anchor)), axis=1)
denominator = anchor_mem - mask_anchor * LARGE_NUM
log_numerator += tf.reduce_sum(denominator * dense_mask, axis=1)
log_numerator = -log_numerator / (
len(splitted_list) - 1.0 + tf.reduce_sum(dense_mask, axis=1))
log_denominator = tf.math.reduce_logsumexp(denominator, axis=1)
cur_loss = tf.reduce_mean(log_numerator + log_denominator)
total_loss = total_loss + cur_loss
total_loss = total_loss*2.0/(len(splitted_list))
total_loss = total_loss*weights
tf.losses.add_loss(total_loss)
return total_loss
def tpu_cross_replica_concat(tensor, tpu_context=None):
"""Reduce a concatenation of the `tensor` across TPU cores.
Args:
tensor: tensor to concatenate.
tpu_context: A `TPUContext`. If not set, CPU execution is assumed.
Returns:
Tensor of the same rank as `tensor` with first dimension `num_replicas`
times larger.
"""
if tpu_context is None or tpu_context.num_replicas <= 1:
return tensor
num_replicas = tpu_context.num_replicas
with tf.name_scope('tpu_cross_replica_concat'):
# This creates a tensor that is like the input tensor but has an added
# replica dimension as the outermost dimension. On each replica it will
# contain the local values and zeros for all other values that need to be
# fetched from other replicas.
ext_tensor = tf.scatter_nd(
indices=[[xla.replica_id()]],
updates=[tensor],
shape=[num_replicas] + tensor.shape.as_list())
# As every value is only present on one replica and 0 in all others, adding
# them all together will result in the full tensor on all replicas.
ext_tensor = tf.tpu.cross_replica_sum(ext_tensor)
# Flatten the replica dimension.
# The first dimension size will be: tensor.shape[0] * num_replicas
# Using [-1] trick to support also scalar input.
return tf.reshape(ext_tensor, [-1] + ext_tensor.shape.as_list()[2:])
|
{
"content_hash": "2a080740f0cf66c85d64fd78798dbb35",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 95,
"avg_line_length": 36.733333333333334,
"alnum_prop": 0.652651744303287,
"repo_name": "google-research/fnc",
"id": "6ae270a1e477d86945d98b0f07479f48b7088a30",
"size": "10595",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "objective.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""
Component that will help set the FFmpeg component.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/ffmpeg/
"""
import logging
import re
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import (
ATTR_ENTITY_ID, EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers.dispatcher import (
async_dispatcher_send, async_dispatcher_connect)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['ha-ffmpeg==1.11']
DOMAIN = 'ffmpeg'
_LOGGER = logging.getLogger(__name__)
SERVICE_START = 'start'
SERVICE_STOP = 'stop'
SERVICE_RESTART = 'restart'
SIGNAL_FFMPEG_START = 'ffmpeg.start'
SIGNAL_FFMPEG_STOP = 'ffmpeg.stop'
SIGNAL_FFMPEG_RESTART = 'ffmpeg.restart'
DATA_FFMPEG = 'ffmpeg'
CONF_INITIAL_STATE = 'initial_state'
CONF_INPUT = 'input'
CONF_FFMPEG_BIN = 'ffmpeg_bin'
CONF_EXTRA_ARGUMENTS = 'extra_arguments'
CONF_OUTPUT = 'output'
DEFAULT_BINARY = 'ffmpeg'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_FFMPEG_BIN, default=DEFAULT_BINARY): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
SERVICE_FFMPEG_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
async def async_setup(hass, config):
"""Set up the FFmpeg component."""
conf = config.get(DOMAIN, {})
manager = FFmpegManager(
hass,
conf.get(CONF_FFMPEG_BIN, DEFAULT_BINARY)
)
await manager.async_get_version()
# Register service
async def async_service_handle(service):
"""Handle service ffmpeg process."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
if service.service == SERVICE_START:
async_dispatcher_send(hass, SIGNAL_FFMPEG_START, entity_ids)
elif service.service == SERVICE_STOP:
async_dispatcher_send(hass, SIGNAL_FFMPEG_STOP, entity_ids)
else:
async_dispatcher_send(hass, SIGNAL_FFMPEG_RESTART, entity_ids)
hass.services.async_register(
DOMAIN, SERVICE_START, async_service_handle,
schema=SERVICE_FFMPEG_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_STOP, async_service_handle,
schema=SERVICE_FFMPEG_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_RESTART, async_service_handle,
schema=SERVICE_FFMPEG_SCHEMA)
hass.data[DATA_FFMPEG] = manager
return True
class FFmpegManager:
"""Helper for ha-ffmpeg."""
def __init__(self, hass, ffmpeg_bin):
"""Initialize helper."""
self.hass = hass
self._cache = {}
self._bin = ffmpeg_bin
self._version = None
self._major_version = None
@property
def binary(self):
"""Return ffmpeg binary from config."""
return self._bin
async def async_get_version(self):
"""Return ffmpeg version."""
from haffmpeg.tools import FFVersion
ffversion = FFVersion(self._bin, self.hass.loop)
self._version = await ffversion.get_version()
self._major_version = None
if self._version is not None:
result = re.search(r"(\d+)\.", self._version)
if result is not None:
self._major_version = int(result.group(1))
return self._version, self._major_version
@property
def ffmpeg_stream_content_type(self):
"""Return HTTP content type for ffmpeg stream."""
if self._major_version is not None and self._major_version > 3:
return 'multipart/x-mixed-replace;boundary=ffmpeg'
return 'multipart/x-mixed-replace;boundary=ffserver'
class FFmpegBase(Entity):
"""Interface object for FFmpeg."""
def __init__(self, initial_state=True):
"""Initialize ffmpeg base object."""
self.ffmpeg = None
self.initial_state = initial_state
async def async_added_to_hass(self):
"""Register dispatcher & events.
This method is a coroutine.
"""
async_dispatcher_connect(
self.hass, SIGNAL_FFMPEG_START, self._async_start_ffmpeg)
async_dispatcher_connect(
self.hass, SIGNAL_FFMPEG_STOP, self._async_stop_ffmpeg)
async_dispatcher_connect(
self.hass, SIGNAL_FFMPEG_RESTART, self._async_restart_ffmpeg)
# register start/stop
self._async_register_events()
@property
def available(self):
"""Return True if entity is available."""
return self.ffmpeg.is_running
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
async def _async_start_ffmpeg(self, entity_ids):
"""Start a FFmpeg process.
This method is a coroutine.
"""
raise NotImplementedError()
async def _async_stop_ffmpeg(self, entity_ids):
"""Stop a FFmpeg process.
This method is a coroutine.
"""
if entity_ids is None or self.entity_id in entity_ids:
await self.ffmpeg.close()
async def _async_restart_ffmpeg(self, entity_ids):
"""Stop a FFmpeg process.
This method is a coroutine.
"""
if entity_ids is None or self.entity_id in entity_ids:
await self._async_stop_ffmpeg(None)
await self._async_start_ffmpeg(None)
@callback
def _async_register_events(self):
"""Register a FFmpeg process/device."""
async def async_shutdown_handle(event):
"""Stop FFmpeg process."""
await self._async_stop_ffmpeg(None)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, async_shutdown_handle)
# start on startup
if not self.initial_state:
return
async def async_start_handle(event):
"""Start FFmpeg process."""
await self._async_start_ffmpeg(None)
self.async_schedule_update_ha_state()
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, async_start_handle)
|
{
"content_hash": "0254252d1b26a986b311c82f291ef357",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 75,
"avg_line_length": 29.018957345971565,
"alnum_prop": 0.6393924546790789,
"repo_name": "PetePriority/home-assistant",
"id": "3184b5a5d54ab31fc9dc6ca899ffdc992d1b3793",
"size": "6123",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/ffmpeg/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1073"
},
{
"name": "Python",
"bytes": "13985647"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
}
|
"""
Django settings for ecoloscore project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*****'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger',
'corsheaders',
'users',
'ecoauth',
'checkpoints',
'coffeecups',
'score',
'contract',
'common',
)
MIDDLEWARE_CLASSES = (
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'ecoloscore.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ecoloscore.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': '*****',
'USER': '*****',
'PASSWORD': '*****',
'HOST': '127.0.0.1',
'PORT': 3306,
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = '/ABSOLUTE/PATH/TO/media/'
MEDIA_URL = '/media/'
# rest framework configuration
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
}
# cors headers
CORS_ORIGIN_ALLOW_ALL = True
|
{
"content_hash": "e863b4240f60616fa7405e58b8e4d2f6",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 71,
"avg_line_length": 24.74264705882353,
"alnum_prop": 0.6692421991084695,
"repo_name": "J1bz/ecoloscore",
"id": "65452c3ed4b079f05c634b88c9e30d5541148283",
"size": "3390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ecoloscore/settings_DIST.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "59276"
}
],
"symlink_target": ""
}
|
from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from zope.interface import implements
class LinksCommand(ModuleData, Command):
implements(IPlugin, IModuleData, ICommand)
name = "LinksCommand"
core = True
def userCommands(self):
return [ ("LINKS", 1, self) ]
def parseParams(self, user, params, prefix, tags):
return {}
def execute(self, user, data):
user.sendMessage(irc.RPL_LINKS, self.ircd.name, self.ircd.name, "0 {}".format(self.ircd.config["server_description"]))
for server in self.ircd.servers.itervalues():
hopCount = 1
nextServer = server.nextClosest
while nextServer != self.ircd.serverID:
nextServer = self.ircd.servers[nextServer].nextClosest
hopCount += 1
if server.nextClosest == self.ircd.serverID:
nextClosestName = self.ircd.name
else:
nextClosestName = self.ircd.servers[server.nextClosest].name
user.sendMessage(irc.RPL_LINKS, server.name, nextClosestName, "{} {}".format(hopCount, server.description))
user.sendMessage(irc.RPL_ENDOFLINKS, "*", "End of /LINKS list.")
return True
linksCmd = LinksCommand()
|
{
"content_hash": "ba5cceffa7a82c4c61de1d5fbccdcf5d",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 120,
"avg_line_length": 35.05882352941177,
"alnum_prop": 0.7374161073825504,
"repo_name": "ElementalAlchemist/txircd",
"id": "8410362d9c3c2da4a6d13fe3b8ec787520f30611",
"size": "1192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "txircd/modules/rfc/cmd_links.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "492365"
}
],
"symlink_target": ""
}
|
import time
import quick2wire.i2c as i2c
import re
import logging
from threading import Lock
from RPi import GPIO
# Support library for MCP23017 on Raspberry pi
log = logging.getLogger("MCP23017")
BUS = i2c.I2CMaster()
GPIO.setmode(GPIO.BCM)
# Register Mapping for Bank SETTING mode
REGISTER_MAPPING = {
0 : {
'IODIR': 0X00,
'IPOL': 0X02,
'GPINTEN': 0X04,
'DEFVAL': 0X06,
'INTCON': 0X08,
'IOCON': 0X0A,
'GPPU': 0X0C,
'INTF': 0X0E,
'INTCAP': 0X10,
'GPIO': 0X12,
'OLAT': 0X14
},
1: {
'IODIR': 0X00,
'IPOL': 0X01,
'GPINTEN': 0X02,
'DEFVAL': 0X03,
'INTCON': 0X04,
'IOCON': 0X05,
'GPPU': 0X06,
'INTF': 0X07,
'INTCAP': 0X08,
'GPIO': 0X09,
'OLAT': 0X0A
}
}
# mapping of bits inside icocon register
IOCON = {'BANK':0b10000000, 'MIRROR': 0b01000000, 'DISSLW': 0b00010000, 'HAEN': 0b00001000, 'ODR': 0b00000100, 'INTPOL': 0b00000010}
MODE_INPUT = 0
MODE_OUTPUT = 1
MODE_PULLUP_HIGH = 1
MODE_PULLUP_DISABLE = 0
MODE_INVERT = 1
MODE_NOINVERT = 0
class PortManager:
state = 0
external_callback = None
parent = None
PREFIX = None
accuracy = 0 #accuracy tells how many callbacks have been executed till gpio goes back to zero
accuracy_callback = None #callback to execute if accuracy reporting is wanted
## Valid prefix values:
# bank = 0 : 0, 1
# bank = 1 : 0x00, 0x10
def __init__(self, mcp, prefix, interrupt_pin, register_resolver = None):
if register_resolver is not None:
self._resolve_register = register_resolver
log.debug("Initialize port 0x{0:x}".format(prefix))
self.lock = Lock()
self.PREFIX = prefix
self.interrupt_pin = interrupt_pin
self.parent = mcp
log.debug("Initialize Pulldown for GPIO pin "+ str(self.interrupt_pin))
GPIO.setup(self.interrupt_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def set_callback(self, callback):
log.debug("Set callback "+str(callback))
self.state = self.parent.read(self._resolve_register(self.parent.REGISTER['GPIO']))
log.debug("Re-Setting initial state of port is now 0b{0:b}".format(self.state))
if self.external_callback is None:
log.debug("first call of set_callback: enabling RPi interrupt")
GPIO.add_event_detect(self.interrupt_pin, GPIO.RISING, callback = self.callback)
self.external_callback = callback
def callback(self, channel):
log.debug("Interrupt detected on address 0x{0:x} with prefix 0x{1:x}; channel {2}".format(self.parent.ADDRESS, self.PREFIX, channel))
self.lock.acquire()
log.debug("Lock aquired!")
log.debug("Before State is 0b{0:b}".format(self.state))
erg = BUS.transaction(
#READ INTF TO FIND OUT INITIATING PIN
i2c.writing_bytes(self.parent.ADDRESS,self._resolve_register(self.parent.REGISTER['INTF'])),
i2c.reading(self.parent.ADDRESS,1),
#READ INTCAP TO GET CURRENTLY ACTIVATED PINS | RESETS THE INTERRUPT
i2c.writing_bytes(self.parent.ADDRESS,self._resolve_register(self.parent.REGISTER['INTCAP'])),
i2c.reading(self.parent.ADDRESS,1),
#READ GPIO TO GET CURRENTLY ACTIVATED PINS | RESETS THE INTERRUPT
i2c.writing_bytes(self.parent.ADDRESS,self._resolve_register(self.parent.REGISTER['GPIO'])),
i2c.reading(self.parent.ADDRESS,1),
)
intf = erg[0][0]
log.debug("INTF was 0b{0:b}".format(intf))
intcap = erg[1][0]
log.debug("INTCAP was 0b{0:b}".format(intcap))
gpio = erg[2][0]
log.debug("GPIO was 0b{0:b}".format(gpio))
current = intf | gpio
#calculate only changes
changes = (self.state ^ 0b11111111) & current
#set new state
self.state = gpio
log.debug("After State is 0b{0:b}".format(self.state))
self.lock.release()
log.debug("Lock released!")
#call callback after lock release
log.debug("Sending changes 0b{0:b} to callback method".format(changes))
self.external_callback(changes, self.PREFIX, self.parent.ADDRESS)
if self.accuracy_callback:
self.accuracy += 1
if (self.state == 0):
self.accuracy_callback(self.accuracy)
self.accuracy = 0
##########################
#Arduino-Lib like methods
##########################
def _resolve_register(self,register):
if self.parent.BANK == 0:
return self.PREFIX + register
elif self.parent.BANK == 1:
return self.PREFIX | register
def _high_level_setter_single_pin(self, pin, mode, register):
config = 1 << pin;
if mode == 0:
parent.unset_register(register, config)
else:
parent.set_register(register, config)
#set single pin to specific mode
def pin_mode(self, pin, mode):
self._high_level_setter_single_pin(self, pin, mode, self._resolve_register(self.parent.REGISTER['IODIR']))
#set all pins at once
def pin_mode(self, mode):
self.parent.write(self._resolve_register(self.parent.REGISTER['IODIR']), mode)
#set single pullup
def pullup_mode(self, pin, mode):
self._high_level_setter_single_pin(self, pin, mode, self._resolve_register(self.parent.REGISTER['GPPU']))
#set all pullups at once
def pullup_mode(self, mode):
self.parent.write(self._resolve_register(self.parent.REGISTER['GPPU']), mode)
#set single input invert
def input_invert(self, pin, mode):
self._high_level_setter_single_pin(self, pin, mode, self._resolve_register(self.parent.REGISTER['IPOL']))
#set all invertings
def input_invert(self, mode):
self.parent.write(self._resolve_register(self.parent.REGISTER['IPOL']), mode)
####################
# Interrupts
####################
#set interrupt for single pin
def interrupt_enable(self, pin, mode):
self._high_level_setter_single_pin(self, pin, mode, self._resolve_register(self.parent.REGISTER['GPINTEN']))
#set inerrupt for all pins
def interrupt_enable(self, mode):
self.parent.write(self._resolve_register(self.parent.REGISTER['GPINTEN']), mode)
#set interrupt on comare for all pins - set DEFVAL accordingly
def interrupt_compare(self, mode):
self.parent.write(self._resolve_register(self.parent.REGISTER['INTCON']), mode)
#set interrupt on comare for all pins - set DEFVAL accordingly
def interrupt_compare_value(self, mode):
self.parent.write(self._resolve_register(self.parent.REGISTER['DEFVAL']), mode)
######################
# Reading and Writing
######################
#write single pin value
def digital_write(self, pin, mode):
self._high_level_setter_single_pin(self, pin, mode, self._resolve_register(self.parent.REGISTER['OLAT']))
#write all pins
def digital_write(self, mode):
self.parent.write(self._resolve_register(self.parent.REGISTER['OLAT']), mode)
#read single pin value
def digital_read(self, pin):
value = self.parent.read(self._resolve_register(self.parent.REGISTER['GPIO']))
return (value >> pin) & 0b00000001
#read all pins
def digital_read(self):
return self.parent.read(self._resolve_register(self.parent.REGISTER['GPIO']))
#################################
#
# Class modeling a MCP23017 chip
#
#################################
class MCP23017(object):
ADDRESS = None
BANK = None
TOGGLE_MODE = False
@property
def REGISTER(self):
return REGISTER_MAPPING[self.BANK]
def __init__(self, address, bank = None, toggle_mode = False):
log.info("Initialize MCP23017 on 0x{0:x}".format(address))
#self._lock = Lock()
self.ADDRESS = address
if not bank == None:
self.bank_mode(bank)
if toggle_mode:
self.enable_toggle_mode()
def bank_mode(self, bank):
self.BANK = bank
log.info("Bank set to {0:d}".format(bank))
#EVERYTHING else goes to zero - some magic to write bit on both settings
if self.BANK == 1: #assume has been bank=0 before
BUS.transaction(
i2c.writing_bytes(self.ADDRESS,0x15, IOCON['BANK']),
i2c.writing_bytes(self.ADDRESS,0x0A, IOCON['BANK']))
elif self.BANK == 0:
BUS.transaction(
i2c.writing_bytes(self.ADDRESS,0x15, 0 ),
i2c.writing_bytes(self.ADDRESS,0x0A, 0 ))
# go to byte mode where address pointer toggles between port pair
def enable_toggle_mode(self):
self.bank_mode(0)
log.info("going to byte Mode")
self.set_config(IOCON['SEQOP'])
self.TOGGLE_MODE = True
################
# Port generation
# this essentially generates ports depending on your
# - bank config
# - given parameters
######################
def generate_ports(self, interrupt_s):
if isinstance(interrupt_s, dict): # 8-bit mode configuration
ports = {}
ports[str(self.ADDRESS)+'_A'] = PortManager(self, 0, interrupt_s['A'])
ports[str(self.ADDRESS)+'_B'] = PortManager(self, 0x10 if self.BANK else 1, interrupt_s['B'])
elif isinstance(x, basestring): # 16-bit configuration - NOT SUPPORTED
self.enable_toggle_mode()
self.set_config(IOCON['MIRROR'])
ports[0] = PortManager(self,0,interrupt_s)
return ports
# to comfortably set and unset chip config
def set_config(self, config):
log.info("Access IOCON, adding: 0b{0:b}".format(config))
self.set_register(self.REGISTER['IOCON'],config)
def unset_config(self, config):
log.info("Access IOCON, removing: 0b{0:b}".format(config))
self.unset_register(self.REGISTER['IOCON'],config)
# Support bitwise setting and unsetting of register values
def set_register(self, register, config):
log.debug("Register 0x{0:x} adding: 0b{1:b}".format(register, config))
register_value = self.read(register)
log.debug("Register before 0b{0:b}".format(register_value))
self.write(register, register_value | config)
log.debug("Register after 0b{0:b}".format(register_value | config))
def unset_register(self, register, config):
log.debug("Register 0x{0:x}, removing: 0b{1:b}".format(register, config))
register_value = self.read(register)
log.debug("Register before 0b{0:b}".format(register_value))
self.write(register, register_value & (config ^ 0b11111111))
log.debug("Register after 0b{0:b}".format(register_value & (config ^ 0b11111111)))
# read and write to specific register - either 8 bit and 16 bit mode are supported implicit by TOGGLE flag
def read(self, register):
byte = BUS.transaction(
i2c.writing_bytes(self.ADDRESS, register),
i2c.reading(self.ADDRESS, 2 if self.TOGGLE_MODE else 1))
if self.TOGGLE_MODE:
data = (byte[0][1] << 8) | byte[0][0]
else:
data = byte[0][0]
log.debug("Reading from address {0:#4X} register 0x{1:#4X} value {2:#10b}".format(self.ADDRESS, register, data))
return data
def write(self, register, value):
log.debug("Writing to address {0:#4X} register 0x{1:#4X} value {2:#10b}".format(self.ADDRESS, register, value))
if self.TOGGLE_MODE:
a = value & 0b11111111
b = (value >> 8) & 0b11111111
BUS.transaction(
i2c.writing_bytes(self.ADDRESS, register, a, b),
)
else:
BUS.transaction(
i2c.writing_bytes(self.ADDRESS, register ,value),
)
#Reads the chip without setting bank mode
if __name__ == "__main__":
import sys
logging.basicConfig()
logging.getLogger( "MCP23017" ).setLevel( logging.DEBUG )
chip = MCP23017(int(sys.argv[1]))
for i in range(0x1B):
byte = chip.read(i)
|
{
"content_hash": "aa0a03532dfa0c3143c9ea9124fcad53",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 137,
"avg_line_length": 32.9536231884058,
"alnum_prop": 0.6527399067640074,
"repo_name": "computerlyrik/MCP23017-RPi-python",
"id": "6a2ea81c4a5fdc622aece9722244aa878d50a51e",
"size": "11388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MCP23017/MCP23017.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11277"
}
],
"symlink_target": ""
}
|
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'vod-metadata'
copyright = u'2017, Bo Bayles'
author = u'Bo Bayles'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'2017.2.25'
# The full version, including alpha/beta/rc tags.
release = u'2017.2.25'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'vod-metadatadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'vod-metadata.tex', u'vod-metadata Documentation',
u'Bo Bayles', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'vod-metadata', u'vod-metadata Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'vod-metadata', u'vod-metadata Documentation',
author, 'vod-metadata', 'One line description of project.',
'Miscellaneous'),
]
|
{
"content_hash": "17837325c67874eee2617ee28bd19e4a",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 78,
"avg_line_length": 30.735537190082646,
"alnum_prop": 0.6652325894057542,
"repo_name": "bbayles/vod_metadata",
"id": "d4f0ff06ba722233176e72cd6160450e0eb83cf9",
"size": "4776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59005"
}
],
"symlink_target": ""
}
|
import unittest
import os
import getpass
import pwd
from os import path
from worker_installer import init_worker_installer
from worker_installer import DEFAULT_MIN_WORKERS, DEFAULT_MAX_WORKERS
from worker_installer import FabricRunner
from worker_installer.tasks import create_celery_configuration
from cloudify.mocks import MockCloudifyContext
from cloudify.context import BootstrapContext
from cloudify.exceptions import NonRecoverableError
# for tests purposes. need a path to a file which always exists
KEY_FILE_PATH = '/bin/sh'
@init_worker_installer
def m(ctx, runner, agent_config, **kwargs):
return agent_config
class CeleryWorkerConfigurationTest(unittest.TestCase):
def setUp(self):
os.environ['MANAGEMENT_USER'] = getpass.getuser()
def test_deployment_config(self):
ctx = MockCloudifyContext(deployment_id='deployment_id')
conf = m(ctx)
self.assertTrue('base_dir' in conf)
self.assertTrue('init_file' in conf)
self.assertTrue('config_file' in conf)
def test_vm_config_validation(self):
ctx = MockCloudifyContext(node_id='node',
properties={'cloudify_agent': {
'distro': 'Ubuntu',
'distro_codename': 'trusty'}})
self.assertRaises(NonRecoverableError, m, ctx)
ctx = MockCloudifyContext(node_id='node',
properties={'cloudify_agent': {
'distro': 'Ubuntu'},
'distro_codename': 'trusty',
'ip': '192.168.0.1'
})
self.assertRaises(NonRecoverableError, m, ctx)
ctx = MockCloudifyContext(node_id='node',
properties={
'cloudify_agent': {
'distro': 'Ubuntu',
'distro_codename': 'trusty',
'user': getpass.getuser()},
'ip': '192.168.0.1'
})
self.assertRaises(NonRecoverableError, m, ctx)
ctx = MockCloudifyContext(node_id='node',
properties={
'cloudify_agent': {
'user': getpass.getuser(),
'key': KEY_FILE_PATH,
'home_dir': self._get_home_dir(),
'distro': 'Ubuntu',
'distro_codename': 'trusty',
},
'ip': '192.168.0.1'
})
m(ctx)
def test_agent_config(self):
node_id = 'node_id'
ctx = MockCloudifyContext(
deployment_id='test',
node_id=node_id,
runtime_properties={
'ip': '192.168.0.1'
},
properties={
'cloudify_agent': {
'user': getpass.getuser(),
'key': KEY_FILE_PATH,
'home_dir': self._get_home_dir(),
'distro': 'Ubuntu',
'distro_codename': 'trusty'
}
}
)
conf = m(ctx)
self.assertTrue('base_dir' in conf)
self.assertTrue('init_file' in conf)
self.assertTrue('config_file' in conf)
self.assertTrue('includes_file' in conf)
def test_disable_requiretty_config(self):
self._test_disable_requiretty_config('true', True)
self._test_disable_requiretty_config('false', False)
self._test_disable_requiretty_config('true', True)
self._test_disable_requiretty_config('true', True)
self._test_disable_requiretty_config(True, True)
self._test_disable_requiretty_config(False, False)
self._test_disable_requiretty_config(value=None,
should_raise_exception=True)
self._test_disable_requiretty_config(value='1234',
should_raise_exception=True)
def _test_disable_requiretty_config(self,
value=None,
expected=None,
should_raise_exception=False):
ctx = MockCloudifyContext(
deployment_id='test'
)
config = {
'disable_requiretty': value,
'distro': 'Ubuntu',
'distro_codename': 'trusty'
}
if should_raise_exception:
self.assertRaises(NonRecoverableError, m, ctx,
cloudify_agent=config)
else:
conf = m(ctx, cloudify_agent=config)
self.assertEqual(expected, conf['disable_requiretty'])
def test_autoscale_configuration(self):
node_id = 'node_id'
ctx = MockCloudifyContext(
deployment_id='test',
node_id=node_id,
runtime_properties={
'ip': '192.168.0.1'
},
properties={
'cloudify_agent': {
'user': getpass.getuser(),
'key': KEY_FILE_PATH,
'home_dir': self._get_home_dir(),
'distro': 'Ubuntu',
'distro_codename': 'trusty'
}
}
)
conf = m(ctx)
self.assertEqual(conf['min_workers'], DEFAULT_MIN_WORKERS)
self.assertEqual(conf['max_workers'], DEFAULT_MAX_WORKERS)
ctx = MockCloudifyContext(
deployment_id='test',
node_id=node_id,
runtime_properties={
'ip': '192.168.0.1'
},
properties={
'cloudify_agent': {
'user': getpass.getuser(),
'home_dir': self._get_home_dir(),
'key': KEY_FILE_PATH,
'min_workers': 2,
'max_workers': 5,
'distro': 'Ubuntu',
'distro_codename': 'trusty'
}
}
)
conf = m(ctx)
self.assertEqual(conf['min_workers'], 2)
self.assertEqual(conf['max_workers'], 5)
def test_illegal_autoscale_configuration(self):
node_id = 'node_id'
ctx = MockCloudifyContext(
deployment_id='test',
node_id=node_id,
runtime_properties={
'ip': '192.168.0.1'
},
properties={
'cloudify_agent': {
'user': getpass.getuser(),
'home_dir': self._get_home_dir(),
'key': KEY_FILE_PATH,
'min_workers': 10,
'max_workers': 5,
'distro': 'Ubuntu',
'distro_codename': 'trusty'
}
}
)
self.assertRaises(NonRecoverableError, m, ctx)
ctx = MockCloudifyContext(
deployment_id='test',
node_id=node_id,
runtime_properties={
'ip': '192.168.0.1'
},
properties={
'cloudify_agent': {
'user': getpass.getuser(),
'home_dir': self._get_home_dir(),
'key': KEY_FILE_PATH,
'min_workers': 'aaa',
'max_workers': 5,
'distro': 'Ubuntu',
'distro_codename': 'trusty'
}
}
)
self.assertRaises(NonRecoverableError, m, ctx)
def test_autoscale_from_bootstrap_context(self):
node_id = 'node_id'
ctx = MockCloudifyContext(
deployment_id='test',
node_id=node_id,
runtime_properties={
'ip': '192.168.0.1'
},
properties={
'cloudify_agent': {
'user': getpass.getuser(),
'home_dir': self._get_home_dir(),
'key': KEY_FILE_PATH,
'distro': 'Ubuntu',
'distro_codename': 'trusty'
}
},
bootstrap_context=BootstrapContext({
'cloudify_agent': {
'min_workers': 2,
'max_workers': 5,
}
})
)
conf = m(ctx)
self.assertEqual(conf['min_workers'], 2)
self.assertEqual(conf['max_workers'], 5)
ctx = MockCloudifyContext(
deployment_id='test',
node_id=node_id,
runtime_properties={
'ip': '192.168.0.1'
},
properties={
'cloudify_agent': {
'user': getpass.getuser(),
'home_dir': self._get_home_dir(),
'key': KEY_FILE_PATH,
'distro': 'Ubuntu',
'distro_codename': 'trusty'
}
},
bootstrap_context=BootstrapContext({
'cloudify_agent': {
'min_workers': 0,
'max_workers': 5,
}
})
)
conf = m(ctx)
self.assertEqual(conf['min_workers'], 0)
self.assertEqual(conf['max_workers'], 5)
def test_key_from_bootstrap_context(self):
node_id = 'node_id'
ctx = MockCloudifyContext(
deployment_id='test',
node_id=node_id,
runtime_properties={
'ip': '192.168.0.1'
},
properties={
'cloudify_agent': {
'user': getpass.getuser(),
'home_dir': self._get_home_dir(),
'distro': 'Ubuntu',
'distro_codename': 'trusty'
}
},
bootstrap_context=BootstrapContext({
'cloudify_agent': {
'agent_key_path': KEY_FILE_PATH
}
})
)
conf = m(ctx)
self.assertEqual(conf['key'], KEY_FILE_PATH)
def test_user_from_bootstrap_context(self):
node_id = 'node_id'
ctx = MockCloudifyContext(
deployment_id='test',
node_id=node_id,
runtime_properties={
'ip': '192.168.0.1'
},
properties={
'cloudify_agent': {
'home_dir': self._get_home_dir(),
'distro': 'Ubuntu',
'distro_codename': 'trusty'
},
},
bootstrap_context=BootstrapContext({
'cloudify_agent': {
'agent_key_path': KEY_FILE_PATH,
'user': getpass.getuser()
}
})
)
conf = m(ctx)
self.assertEqual(conf['user'], getpass.getuser())
def test_ssh_port_default(self):
node_id = 'node_id'
ctx = MockCloudifyContext(
deployment_id='test',
node_id=node_id,
runtime_properties={
'ip': '192.168.0.1'
},
properties={
'cloudify_agent': {
'home_dir': self._get_home_dir(),
'distro': 'Ubuntu',
'distro_codename': 'trusty'
},
},
bootstrap_context=BootstrapContext({
'cloudify_agent': {
'agent_key_path': KEY_FILE_PATH,
'user': getpass.getuser(),
}
})
)
conf = m(ctx)
self.assertEqual(conf['port'], 22)
def test_bad_key_path(self):
node_id = 'node_id'
ctx = MockCloudifyContext(
deployment_id='test',
node_id=node_id,
runtime_properties={
'ip': '192.168.0.1'
},
properties={
'cloudify_agent': {
'home_dir': self._get_home_dir(),
'distro': 'Ubuntu',
'distro_codename': 'trusty'
},
},
bootstrap_context=BootstrapContext({
'cloudify_agent': {
'agent_key_path': 'bad_key_path',
'user': getpass.getuser(),
}
})
)
self.assertRaises(NonRecoverableError, m, ctx)
def test_ssh_port_from_bootstrap_context(self):
node_id = 'node_id'
ctx = MockCloudifyContext(
deployment_id='test',
node_id=node_id,
runtime_properties={
'ip': '192.168.0.1'
},
properties={
'cloudify_agent': {
'home_dir': self._get_home_dir(),
'distro': 'Ubuntu',
'distro_codename': 'trusty'
},
},
bootstrap_context=BootstrapContext({
'cloudify_agent': {
'agent_key_path': KEY_FILE_PATH,
'user': getpass.getuser(),
'remote_execution_port': 2222
}
})
)
conf = m(ctx)
self.assertEqual(conf['port'], 2222)
def test_ssh_port_from_config_override_bootstrap(self):
node_id = 'node_id'
ctx = MockCloudifyContext(
deployment_id='test',
node_id=node_id,
runtime_properties={
'ip': '192.168.0.1'
},
properties={
'cloudify_agent': {
'home_dir': self._get_home_dir(),
'distro': 'Ubuntu',
'distro_codename': 'trusty',
'port': 3333
},
},
bootstrap_context=BootstrapContext({
'cloudify_agent': {
'agent_key_path': KEY_FILE_PATH,
'user': getpass.getuser(),
'remote_execution_port': 2222
}
})
)
conf = m(ctx)
self.assertEqual(conf['port'], 3333)
def test_workflows_agent_config(self):
ctx = MockCloudifyContext(
deployment_id='test',
runtime_properties={
'ip': '192.168.0.1'
}
)
config = {
'workflows_worker': 'true',
'distro': 'Ubuntu',
'distro_codename': 'trusty'
}
conf = m(ctx, cloudify_agent=config)
self.assertEqual(conf['name'], 'test_workflows')
def _get_home_dir(self):
return pwd.getpwnam(getpass.getuser()).pw_dir
class MockFabricRunner(FabricRunner):
def __init__(self):
self.put_files = {}
def put(self, file_path, content, use_sudo=False):
self.put_files[file_path] = content
class ConfigurationCreationTest(unittest.TestCase):
def setUp(self):
os.environ['MANAGEMENT_USER'] = getpass.getuser()
os.environ['MANAGER_REST_PORT'] = '8100'
os.environ['MANAGEMENT_IP'] = '192.168.0.1'
os.environ['AGENT_IP'] = '192.168.0.2'
def read_file(self, file_name):
file_path = path.join(path.dirname(__file__), file_name)
with open(file_path, 'r') as f:
return f.read()
def get_resource(self, resource_name):
if 'celeryd-cloudify.init' in resource_name:
return self.read_file('Ubuntu-celeryd-cloudify.init.jinja2')
elif 'celeryd-cloudify.conf' in resource_name:
return self.read_file('Ubuntu-celeryd-cloudify.conf.jinja2')
return None
def test_prepare_configuration(self):
ctx = MockCloudifyContext(deployment_id='deployment_id')
agent_config = m(ctx)
runner = MockFabricRunner()
create_celery_configuration(ctx,
runner,
agent_config,
self.get_resource)
self.assertEquals(3, len(runner.put_files))
self.assertTrue(agent_config['init_file'] in runner.put_files)
self.assertTrue(agent_config['config_file'] in runner.put_files)
self.assertTrue(agent_config['includes_file'] in runner.put_files)
|
{
"content_hash": "658f4a710b1114d1d0b82b395e2c0b18",
"timestamp": "",
"source": "github",
"line_count": 476,
"max_line_length": 75,
"avg_line_length": 35.04201680672269,
"alnum_prop": 0.4505395683453237,
"repo_name": "konradxyz/dev_fileserver",
"id": "9d083d46ea1336ca4d891c2b1c3dd5be1505d5da",
"size": "17318",
"binary": false,
"copies": "4",
"ref": "refs/heads/cloudify_manager",
"path": "plugins/agent-installer/worker_installer/tests/test_configuration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "3350"
},
{
"name": "Python",
"bytes": "845025"
},
{
"name": "Shell",
"bytes": "17435"
}
],
"symlink_target": ""
}
|
import itertools
from types import MappingProxyType
from ..util import superscript, tuplize, ImmutableBase
from .grammar import UnionExp, TypeExp
from .collection import Tuple
class TypeVarExp(UnionExp):
def __init__(self, members, tmap, input=False, output=False, index=None):
self.mapping = tmap
self.input = input
self.output = output
self.index = index
super().__init__(members)
def __repr__(self):
numbers = {}
for idx, m in enumerate(self.members, 1):
if m in numbers:
numbers[m] += superscript(',' + str(idx))
else:
numbers[m] = superscript(idx)
return " | ".join([repr(k) + v for k, v in numbers.items()])
def uniq_upto_sub(self, a_expr, b_expr):
"""
Two elements are unique up to a subtype if they are indistinguishable
with respect to that subtype. In the case of a type var, that means
the same branches must be "available" in the type map.
This means that A or B may have additional refinements (or may even be
subtypes of each other), so long as that does not change the branch
chosen by the type map.
"""
a_branches = [m for m in self.members if a_expr <= m]
b_branches = [m for m in self.members if b_expr <= m]
return a_branches == b_branches
def __eq__(self, other):
return (type(self) is type(other)
and self.index == other.index
and self.mapping == other.mapping)
def __hash__(self):
return hash(self.index) ^ hash(self.mapping)
def is_concrete(self):
return False
def can_intersect(self):
return False
def get_union_membership_expr(self, self_expr):
return None
def _is_subtype_(self, other):
return all(m <= other for m in self.members)
def _is_supertype_(self, other):
return any(m >= other for m in self.members)
def __iter__(self):
yield from self.members
def unpack_union(self):
yield self
def to_ast(self):
return {
"type": "variable",
"index": self.index,
"group": id(self.mapping),
"outputs": self.mapping.input_width(),
"mapping": [
([k.to_ast() for k in key.fields]
+ [v.to_ast() for v in value.fields])
for key, value in self.mapping.lifted.items()]
}
class TypeMap(ImmutableBase):
def __init__(self, mapping):
mapping = {Tuple[tuplize(k)]: Tuple[tuplize(v)]
for k, v in mapping.items()}
branches = list(mapping)
for i, a in enumerate(branches):
for j in range(i, len(branches)):
b = branches[j]
try:
intersection = a & b
except TypeError:
raise ValueError("Cannot place %r and %r in the same "
"type variable." % (a, b))
if (intersection.is_bottom()
or intersection is a or intersection is b):
continue
for k in range(i):
if intersection <= branches[k]:
break
else:
raise ValueError(
"Ambiguous resolution for invocations with type %r."
" Could match %r or %r, add a new branch ABOVE these"
" two (or modify these branches) to correct this."
% (intersection.fields, a.fields, b.fields))
self.__lifted = mapping
super()._freeze_()
@property
def lifted(self):
return MappingProxyType(self.__lifted)
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(id(self))
def __iter__(self):
for idx, members in enumerate(
zip(*(k.fields for k in self.lifted.keys()))):
yield TypeVarExp(members, self, input=True, index=idx)
yield from self.iter_outputs()
def solve(self, *inputs):
inputs = Tuple[inputs]
for branch, outputs in self.lifted.items():
if inputs <= branch:
return outputs.fields
def input_width(self):
return len(next(iter(self.lifted.keys())).fields)
def iter_outputs(self, *, _double_as_input=False):
start = self.input_width()
for idx, members in enumerate(
zip(*(v.fields for v in self.lifted.values())), start):
yield TypeVarExp(members, self, output=True, index=idx,
input=_double_as_input)
def _get_intersections(listing):
intersections = []
for a, b in itertools.combinations(listing, 2):
i = a & b
if i.is_bottom() or i is a or i is b:
continue
intersections.append(i)
return intersections
def TypeMatch(listing):
listing = list(listing)
intersections = _get_intersections(listing)
to_add = []
while intersections:
to_add.extend(intersections)
intersections = _get_intersections(intersections)
mapping = TypeMap({x: x for x in list(reversed(to_add)) + listing})
# TypeMatch only produces a single variable
# iter_outputs is used by match for solving, so the index must match
return next(iter(mapping.iter_outputs(_double_as_input=True)))
def select_variables(expr):
"""When called on an expression, will yield selectors to the variable.
A selector will either return the variable (or equivalent fragment) in
an expression, or will return an entirely new expression with the
fragment replaced with the value of `swap`.
e.g.
>>> from qiime2.core.type.tests.test_grammar import (MockTemplate,
... MockPredicate)
>>> Example = MockTemplate('Example', fields=('x',))
>>> Foo = MockTemplate('Foo')
>>> Bar = MockPredicate('Bar')
>>> T = TypeMatch([Foo])
>>> U = TypeMatch([Bar])
>>> select_u, select_t = select_variables(Example[T] % U)
>>> t = select_t(Example[T] % U)
>>> assert T is t
>>> u = select_u(Example[T] % U)
>>> assert U is u
>>> frag = select_t(Example[Foo] % Bar)
>>> assert frag is Foo
>>> new_expr = select_t(Example[T] % U, swap=frag)
>>> assert new_expr == Example[Foo] % U
"""
if type(expr) is TypeVarExp:
def select(x, swap=None):
if swap is not None:
return swap
return x
yield select
return
if type(expr) is not TypeExp:
return
if type(expr.full_predicate) is TypeVarExp:
def select(x, swap=None):
if swap is not None:
return x.duplicate(predicate=swap)
return x.full_predicate
yield select
for idx, field in enumerate(expr.fields):
for sel in select_variables(field):
# Without this closure, the idx in select will be the last
# value of the enumerate, same for sel
# (Same problem as JS with callbacks inside a loop)
def closure(idx, sel):
def select(x, swap=None):
if swap is not None:
new_fields = list(x.fields)
new_fields[idx] = sel(x.fields[idx], swap)
return x.duplicate(fields=tuple(new_fields))
return sel(x.fields[idx])
return select
yield closure(idx, sel)
def match(provided, inputs, outputs):
provided_binding = {}
error_map = {}
for key, expr in inputs.items():
for selector in select_variables(expr):
var = selector(expr)
provided_fragment = selector(provided[key])
try:
current_binding = provided_binding[var]
except KeyError:
provided_binding[var] = provided_fragment
error_map[var] = provided[key]
else:
if not var.uniq_upto_sub(current_binding, provided_fragment):
raise ValueError("Received %r and %r, but expected %r"
" and %r to match (or to select the same"
" output)."
% (error_map[var], provided[key],
current_binding, provided_fragment))
# provided_binding now maps TypeVarExp instances to a TypeExp instance
# which is the relevent fragment from the provided input types
grouped_maps = {}
for item in provided_binding.items():
var = item[0]
if var.mapping not in grouped_maps:
grouped_maps[var.mapping] = [item]
else:
grouped_maps[var.mapping].append(item)
# grouped_maps now maps a TypeMap instance to tuples of
# (TypeVarExp, TypeExp) which are the items of provided_binding
# i.e. all of the bindings are now grouped under their shared type maps
output_fragments = {}
for mapping, group in grouped_maps.items():
if len(group) != mapping.input_width():
raise ValueError("Missing input variables")
inputs = [x[1] for x in sorted(group, key=lambda x: x[0].index)]
solved = mapping.solve(*inputs)
if solved is None:
provided = tuple(error_map[x[0]]
for x in sorted(group, key=lambda x: x[0].index))
raise ValueError("No solution for inputs: %r, check the signature "
"to see valid combinations." % (provided,))
# type vars share identity by instance of map and index, so we will
# be able to see the "same" vars again when looking up the outputs
for var, out in zip(mapping.iter_outputs(), solved):
output_fragments[var] = out
# output_fragments now maps a TypeVarExp to a TypeExp which is the solved
# fragment for the given output type variable
results = {}
for key, expr in outputs.items():
r = expr # output may not have a typevar, so default is the expr
for selector in select_variables(expr):
var = selector(expr)
r = selector(r, swap=output_fragments[var])
results[key] = r
# results now maps a key to a full TypeExp as solved by the inputs
return results
|
{
"content_hash": "2b5c075ec3445349963aceaee65100b1",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 79,
"avg_line_length": 35.180602006688964,
"alnum_prop": 0.5587983648635801,
"repo_name": "qiime2/qiime2",
"id": "c33385641b6fbe4df1436cbd80347020deb0ce52",
"size": "10869",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qiime2/core/type/meta.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "640"
},
{
"name": "Python",
"bytes": "1217048"
},
{
"name": "Shell",
"bytes": "217"
},
{
"name": "TeX",
"bytes": "5480"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import mock
import unittest
import pandas
import datalab.stackdriver.commands._monitoring as monitoring_commands
PROJECT = 'my-project'
class TestCases(unittest.TestCase):
@mock.patch('datalab.stackdriver.commands._monitoring._render_dataframe')
@mock.patch('datalab.stackdriver.monitoring.MetricDescriptors')
def test_list_metric_descriptors(self, mock_metric_descriptors, mock_render_dataframe):
METRIC_TYPES = ['compute.googleapis.com/instances/cpu/utilization',
'compute.googleapis.com/instances/cpu/usage_time']
DATAFRAME = pandas.DataFrame(METRIC_TYPES, columns=['Metric type'])
PATTERN = 'compute*cpu*'
mock_metric_class = mock_metric_descriptors.return_value
mock_metric_class.as_dataframe.return_value = DATAFRAME
monitoring_commands._list_metric_descriptors(
{'project': PROJECT, 'type': PATTERN}, None)
mock_metric_descriptors.assert_called_once_with(project_id=PROJECT)
mock_metric_class.as_dataframe.assert_called_once_with(pattern=PATTERN)
mock_render_dataframe.assert_called_once_with(DATAFRAME)
@mock.patch('datalab.stackdriver.commands._monitoring._render_dataframe')
@mock.patch('datalab.stackdriver.monitoring.ResourceDescriptors')
def test_list_resource_descriptors(self, mock_resource_descriptors, mock_render_dataframe):
RESOURCE_TYPES = ['gce_instance', 'aws_ec2_instance']
DATAFRAME = pandas.DataFrame(RESOURCE_TYPES, columns=['Resource type'])
PATTERN = '*instance*'
mock_resource_class = mock_resource_descriptors.return_value
mock_resource_class.as_dataframe.return_value = DATAFRAME
monitoring_commands._list_resource_descriptors(
{'project': PROJECT, 'type': PATTERN}, None)
mock_resource_descriptors.assert_called_once_with(project_id=PROJECT)
mock_resource_class.as_dataframe.assert_called_once_with(pattern=PATTERN)
mock_render_dataframe.assert_called_once_with(DATAFRAME)
@mock.patch('datalab.stackdriver.commands._monitoring._render_dataframe')
@mock.patch('datalab.stackdriver.monitoring.Groups')
def test_list_groups(self, mock_groups, mock_render_dataframe):
GROUP_IDS = ['GROUP-205', 'GROUP-101']
DATAFRAME = pandas.DataFrame(GROUP_IDS, columns=['Group ID'])
PATTERN = 'GROUP-*'
mock_group_class = mock_groups.return_value
mock_group_class.as_dataframe.return_value = DATAFRAME
monitoring_commands._list_groups(
{'project': PROJECT, 'name': PATTERN}, None)
mock_groups.assert_called_once_with(project_id=PROJECT)
mock_group_class.as_dataframe.assert_called_once_with(pattern=PATTERN)
mock_render_dataframe.assert_called_once_with(DATAFRAME)
|
{
"content_hash": "29728696a4ec7a5493562905d6b0fce6",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 93,
"avg_line_length": 42.265625,
"alnum_prop": 0.744547134935305,
"repo_name": "supriyagarg/pydatalab",
"id": "1361712f11897a8265f67c7508507e34fba249e0",
"size": "3294",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "legacy_tests/stackdriver/commands/monitoring_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3798"
},
{
"name": "Python",
"bytes": "767068"
},
{
"name": "Shell",
"bytes": "2456"
},
{
"name": "TypeScript",
"bytes": "50852"
}
],
"symlink_target": ""
}
|
import datetime
import json
from twisted.web import http
from twisted.internet import defer
from twisted.web import client
from zope import interface
from piped import util, exceptions, processing, yamlutil
from piped.processors import base
class HttpRequestProcessor(base.Processor):
def __init__(self, request_path='request', skip_if_request_stopped=True, **kw):
super(HttpRequestProcessor, self).__init__(**kw)
self.request_path = request_path
self.skip_if_request_stopped = skip_if_request_stopped
def process(self, baton):
request = self.get_request_or_fail(baton)
if self.skip_if_request_stopped and not request.channel:
return baton
return self.process_request(request, baton)
def get_request_or_fail(self, baton):
request = util.dict_get_path(baton, self.request_path)
if not request:
self._fail_because_request_is_invalid()
return request
def _fail_because_request_is_invalid(self):
raise exceptions.PipedError('could not find request at %r' % self.request_path)
def process_request(self, request, baton):
raise NotImplementedError()
# TODO: Docs
class ResponseWriter(HttpRequestProcessor):
""" A processor that writes the response to a twisted.web.server.Request
:param response_code: Either an integer response code or a string. If a string is
supplied, it is converted to an integer by looking up the response codes
defined in twisted.web.http during initialization.
:type response_code: int or str
"""
interface.classProvides(processing.IProcessor)
name = 'write-web-response'
def __init__(self, content_path='content', content_type=None, response_code=None,
encoding='utf8', finish=True, fallback_content=Ellipsis, **kw):
super(ResponseWriter, self).__init__(**kw)
self.content_path = content_path
self.content_type = content_type
self.finish = finish
self.fallback_content = fallback_content
self.encoding = encoding
if isinstance(response_code, basestring):
response_code = getattr(http, response_code, Ellipsis)
self._fail_if_response_code_is_invalid(response_code)
self.response_code = response_code
def _fail_if_response_code_is_invalid(self, response_code):
if response_code is not None and not isinstance(response_code, int):
e_msg = 'Invalid response code.'
hint = 'A response code must be either an integer, or a valid string.'
valid_strings = set()
for code_name in dir(http):
code = getattr(http, code_name, Ellipsis)
if isinstance(code, int) and code in http.RESPONSES:
valid_strings.add('"%s"' % code_name)
detail = 'Valid strings are: %s.' % (','.join(valid_strings))
raise exceptions.ConfigurationError(e_msg, hint, detail)
def process_request(self, request, baton):
content = util.dict_get_path(baton, self.content_path, self.fallback_content)
if self.response_code:
request.setResponseCode(self.response_code)
if self.content_type:
request.setHeader('content-type', self.content_type)
if isinstance(content, unicode):
content = content.encode(self.encoding)
if content is not Ellipsis:
request.write(content)
if self.finish:
request.finish()
return baton
class IPDeterminer(HttpRequestProcessor):
""" Determine the IP of the HTTP-client.
If *proxied* is true, then the *proxy_header*, which defaults to
"x-forwarded-for", is used to get the IP.
If an IP is not found at the proxy header, the client-IP is
returned --- unless *fail_if_not_proxied* is true, in which case
a `PipedError` is raised.
"""
interface.classProvides(processing.IProcessor)
name = 'determine-ip'
def __init__(self, output_path='ip',
proxied=False, proxy_header='x-forwarded-for', fail_if_not_proxied=False, **kw):
kw.setdefault('skip_if_request_stopped', False)
super(IPDeterminer, self).__init__(**kw)
self.output_path = output_path
self.proxied = proxied
self.proxy_header = proxy_header
self.fail_if_not_proxied = fail_if_not_proxied
def process_request(self, request, baton):
ip = self._determine_ip(request)
util.dict_set_path(baton, self.output_path, ip)
return baton
def _determine_ip(self, request):
if self.proxied:
# Try to get the IP from the proxy-header
ip = request.getHeader(self.proxy_header)
if ip:
return ip.split(',')[0].strip()
if self.fail_if_not_proxied:
e_msg = 'could not determine IP from proxy-header'
detail = 'The proxy header is "%s"' % self.proxy_header
hint = ('Ensure the proxy-header is right, set "proxied" to false if the service is '
'no longer proxied, or set "fail_if_not_proxied" to false to fallback to the client IP.')
raise exceptions.PipedError(e_msg, detail, hint)
return request.getClientIP()
class SetHttpHeaders(HttpRequestProcessor):
""" Adds *headers* as response headers. """
interface.classProvides(processing.IProcessor)
name = 'set-http-headers'
def __init__(self, headers, **kw):
super(SetHttpHeaders, self).__init__(**kw)
self.headers = headers
def process_request(self, request, baton):
for name, value in self.headers.items():
request.setHeader(name, value)
return baton
class SetExpireHeader(HttpRequestProcessor):
""" Set cache headers to indicate that the response should be
cached for *timedelta* seconds.
:param timedelta: a dictionary with the keys *days*, *hours*,
*minutes* and *seconds*. The resulting timedelta is the sum of
these.
"""
interface.classProvides(processing.IProcessor)
name = 'set-http-expires'
def __init__(self, timedelta, **kw):
super(SetExpireHeader, self).__init__(**kw)
self.timedelta_kwargs = timedelta
def process_request(self, request, baton):
delta = datetime.timedelta(**self.timedelta_kwargs)
seconds = 86400 * delta.days + delta.seconds
until = datetime.datetime.now() + delta
request.setHeader('expires', until.strftime('%a, %d %b %Y %H:%M:%S %Z').strip())
request.setHeader('cache-control', 'public,max-age=%i' % seconds)
return baton
class ExtractRequestArguments(base.MappingProcessor):
""" Extract arguments from a :class:`twisted.web.server.Request`-like object.
The input paths in the mapping is lookup up in the request arguments and
copied to the specified output paths.
The mapping support the following additional keywords:
only_first
Only returns the first request argument by that name. Defaults to True.
load_json
Causes the value to be loaded as json before being copied into the baton.
Defaults to False.
Consider the following example configuration:
.. code-block:: yaml
mapping:
- foo
- bar:
only_first: false
- baz:
load_json: true
- zip:
output_path: zap
Using the above configuration to extract the request arguments of a request to
``http://.../?foo=1&foo=2&bar=3&bar=4&baz={"test":[5,6,7]}&zip=8`` results in the following baton:
.. code-block:: yaml
request: <Request object>
foo: '1'
bar: ['1', '2']
baz:
test: [5, 6, 7]
zap: '8'
Note that the integers in the request are not parsed. For more advanced input validation, see
the :ref:`validate-with-formencode` processor.
"""
interface.classProvides(processing.IProcessor)
name = 'extract-web-request-arguments'
def __init__(self, request_path='request', *a, **kw):
"""
:param request_path: Path to the request object in the baton.
:param skip_if_nonexistent: Whether to skip mapping entries that are not found in the request.
"""
super(ExtractRequestArguments, self).__init__(*a, **kw)
self.request_path = request_path
def get_input(self, baton, input_path, **kwargs):
request = util.dict_get_path(baton, self.request_path)
return request.args.get(input_path, Ellipsis)
def process_mapping(self, input, input_path, output_path, baton, only_first=True, load_json=False):
# we have to recheck if the input_path is in the request arguments, otherwise we don't know
# whether the input is a default provided by our configuration or an actual argument.
request = util.dict_get_path(baton, self.request_path)
if input_path not in request.args:
return input
if load_json:
for i, value in enumerate(input):
input[i] = json.loads(value)
if only_first:
return input[0]
return input
class ClientGetPage(base.Processor):
""" A simple web client agent for simple HTTP requests. """
interface.classProvides(processing.IProcessor)
name = 'web-client-get-page'
def __init__(self, base_url=None, url=yamlutil.BatonPath('url'), method='GET', headers=None, agent=None, timeout=0, cookies=None,
follow_redirect=True, redirect_limit=20, after_found_get=False, output_path='page', postdata=None, *a, **kw):
"""
If any of the following arguments resolve to a callable, it is called without any arguments and the return value is used.
:param base_url: A string that is prepended to the given url.
:param url: If url is a list, it is flattened to a string by joining with '/'
:param method: The HTTP method to use in the request.
:param headers: Dict of headers.
:param agent: Client agent string.
:param timeout: Set a max
:param cookies: Dict of cookies
:param follow_redirect: Whether to follow redirects.
:param redirect_limit: The maximum number of HTTP redirects that can occur before it is assumed that the redirection is endless
:param after_found_get: Deviate from the HTTP 1.1 RFC by handling redirects the same way as most web browsers; if the request
method is POST and a 302 status is encountered, the redirect is followed with a GET method
:param postdata: Data to post. If it is a buffer (has a callable .read(), postdata.read() is called and the result is used.
:param output_path: Path to use for the page.
:return:
"""
super(ClientGetPage, self).__init__(*a, **kw)
self.base_url = base_url
self.kwargs = dict(
url = url,
method = method,
headers = headers,
agent = agent,
timeout = timeout,
cookies = cookies,
followRedirect = follow_redirect,
redirectLimit = redirect_limit,
afterFoundGet = after_found_get,
postdata = postdata
)
self.output_path = output_path
@defer.inlineCallbacks
def process(self, baton):
kwargs = self.kwargs.copy()
for key, value in kwargs.items():
value = self.get_input(baton, value)
if hasattr(value, '__call__'):
value = yield value()
kwargs[key] = yield value
postdata = kwargs['postdata']
if hasattr(postdata, 'read') and hasattr(postdata.read, '__call__'):
kwargs['postdata'] = yield postdata.read()
# prepend the base url and ensure flatten the url argument in case the url argument is a list (i.e !path request.postpath)
base_url = self.get_input(baton, self.base_url) or ''
kwargs['url'] = base_url + self._flatten(kwargs['url'])
response = yield client.getPage(**kwargs)
baton = self.get_resulting_baton(baton, self.output_path, response)
defer.returnValue(baton)
def _flatten(self, string_or_list, separator='/'):
if isinstance(string_or_list, list):
return separator.join(string_or_list)
return string_or_list
|
{
"content_hash": "0fb4c5609456703925cde7cdb383ae17",
"timestamp": "",
"source": "github",
"line_count": 335,
"max_line_length": 135,
"avg_line_length": 37.28059701492537,
"alnum_prop": 0.6303947473776924,
"repo_name": "foundit/Piped",
"id": "6c637c66f8a8c332ff461aab8f4fde9997d41182",
"size": "12588",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "piped/processors/web_processors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "20464"
},
{
"name": "Python",
"bytes": "1084679"
}
],
"symlink_target": ""
}
|
from __init__ import read_pos, coordinate_correct
from image import Image
def matrix(name, x_top_left, y_top_left):
""" Build the final perstective transformation matrix in a string.
Parameters
============
name : String
The name of the iamge, such as '1.23.tif'
x_topleft : float
The x coordinate of the origin of the layout
y_topleft : float
The y coordinate of the origin of the layout
Returns
============
landmarks : tuple of floats
The coordiantes of the four landmrks
matrix : string
The perspective matrix in a string: a row after the other
"""
relative_x, relative_y = read_pos(name, x_top_left, y_top_left)
relative_x = str(relative_x)
relative_y = str(relative_y)
imag = Image(name,
x_top_left,
y_top_left,
landmarks=None,
matrix = None)
lm = imag.get_landmarks()
size = imag.get_imagesize()
xy_corrected = coordinate_correct(size, lm)
lm_list = [str(num) for num in xy_corrected]
landmarks = ','.join(lm_list)
ptMatrix = imag.get_matrix()
ptMatrix_list0 = [str(num) for num in ptMatrix[0]]
ptMatrix1 = ','.join(mx_list0)
ptMatrix_list1 = [str(num) for num in ptMatrix[1]]
ptMatrix2 = ','.join(mx_list1)
ptMatrix_list2 = [str(num) for num in ptMatrix[2]]
ptMatrix3 = ','.join(mx_list2)
final_matrix = '('+ptMatrix1 + ') ('+ ptMatrix2 + ') (' + ptMatrix3 + ')'
return landmarks, final_matrix
|
{
"content_hash": "fcdda0a55cc9ce64501c8aa72e272df3",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 81,
"avg_line_length": 32.04,
"alnum_prop": 0.5736579275905118,
"repo_name": "suzhaoen/pkout",
"id": "36c5d9f0fd1d613f23f017d5e96d1898fa714129",
"size": "1602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/matrix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18066"
}
],
"symlink_target": ""
}
|
try:
from ucollections import namedtuple
except:
from collections import namedtuple
# test http request for parameters required by rfc6455
# indicating request is request to create websocket
def is_websocket_request(request):
opts = request.options
if "Sec-WebSocket-Key" in opts \
and "Upgrade" in opts \
and opts["Upgrade"].lower() == "websocket" \
and "Connection" in opts \
and opts["Connection"].lower() == "upgrade" \
and "Sec-WebSocket-Version" in opts \
and opts["Sec-WebSocket-Version"] == "13" \
and "Origin" in opts \
and "Host" in opts \
and request.ver.major >= 1 \
and request.ver.minor >= 1:
return True
return False
Request = namedtuple("Request", ("method", "uri", "ver", "options", "data"))
HttpVer = namedtuple("HttpVer", ("major", "minor"))
def request(req, options, data=None):
method,path,ver = str(req.strip(), "utf-8").split(" ")
return Request(
method,
uri(path),
HttpVer(*map(int, ver.split("/")[1].split("."))),
options,
data
)
# browser does not send uri fragments to server
Uri = namedtuple("Uri", ("path", "file", "query"))
def uri(uri):
if uri.count("/") and uri.count(".") and uri.rfind(".") > uri.rfind("/"):
path,file = uri.rsplit("/", 1)
elif not uri.count("."):
path,file = uri,""
else:
path,file = "", uri
if path and path[0] == "/":
path = path[1:]
# assume uri is valid and only contains one ?
if file.count("?"):
file,query = file.split("?")
else:
query = ""
return Uri(path, file, query)
|
{
"content_hash": "b181039e6141da2dbc4f95e7b31fff61",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 30.896551724137932,
"alnum_prop": 0.5368303571428571,
"repo_name": "PinkInk/upylib",
"id": "e0d35baf73eb6d4be31efedaeb2e91ce90591206",
"size": "1792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "http.bak/http/parse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2998"
},
{
"name": "Python",
"bytes": "158212"
}
],
"symlink_target": ""
}
|
import os
def is_os_64bit():
arch1 = (os.environ.get('PROCESSOR_ARCHITECTURE') or "").lower()
arch2 = (os.environ.get('PROCESSOR_ARCHITEW6432') or "").lower()
return (arch1 == "amd64" or arch2 == "amd64")
def set_msvc_environment(installDir, arch):
oldPathEnv = os.environ.get('PATH') or ""
os.environ["INCLUDE"] = "%s\\VC\\include" % installDir
if arch == "x86":
os.environ['LIB'] = "%s\\VC\\lib" % installDir
os.environ['PATH'] = "%s\\VC\\bin;%s\\Common7\\IDE;%s" % (installDir, installDir, oldPathEnv)
elif arch == "amd64":
os.environ['LIB'] = "%s\\VC\\lib\\amd64" % installDir
if is_os_64bit() and os.path.exists("%s\\VC\\bin\\amd64\\cl.exe" % installDir):
os.environ['PATH'] = "%s\\VC\\bin\\amd64;%s\\Common7\\IDE;%s" % (installDir, installDir, oldPathEnv)
else:
os.environ['PATH'] = "%s\\VC\\bin\\x86_amd64;%s\\Common7\\IDE;%s" % (installDir, installDir, oldPathEnv)
else:
raise Exception("Unexpected arch ... should not be reachable ...")
|
{
"content_hash": "22809413d6b263e18c8dc26b046bc9af",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 116,
"avg_line_length": 43.958333333333336,
"alnum_prop": 0.5914691943127962,
"repo_name": "fifoforlifo/pynja",
"id": "6db8790bee9bf31f4138e29ee0a9eedb53b1fa61",
"size": "1055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/pynja/scripts/msvc_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1431"
},
{
"name": "C++",
"bytes": "2971018"
},
{
"name": "Java",
"bytes": "701"
},
{
"name": "Protocol Buffer",
"bytes": "353085"
},
{
"name": "Python",
"bytes": "190328"
},
{
"name": "Shell",
"bytes": "5634"
}
],
"symlink_target": ""
}
|
match = {'django_openid': 'django-openid'}
urls = {
'django-tagging': 'http://code.google.com/p/django-tagging/',
'django-djpcms': 'http://djpcms.com',
'django-openid': 'http://github.com/simonw/django-openid'
}
|
{
"content_hash": "413b41d5838cff42dc0a3139bd9aa2b7",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 69,
"avg_line_length": 35,
"alnum_prop": 0.5918367346938775,
"repo_name": "strogo/djpcms",
"id": "d98cb018d862a3002603ed1d0b212f2efc3b040d",
"size": "246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djpcms/utils/appurls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import division
module_name = "didyoumean"
def unique_sorted(s):
return ''.join(sorted(set(s)))
def did_you_mean(given, cmd_name_list):
highest_ranked = ("", 0)
given = unique_sorted(given.lower())
for name in cmd_name_list:
nameu = unique_sorted(name)
score = 0
for c in given:
if c in nameu:
score += 1
if score >= len(name) / 2 and score > highest_ranked[1]:
highest_ranked = (name, score)
if highest_ranked[1] == 0:
return None
else:
return highest_ranked[0]
def on_bot_load(bot):
orig_method = bot.command
def command_with_didyoumean(cmd, msg, event):
result = orig_method(cmd, msg, event)
if result == "Command not found.":
dym = did_you_mean(cmd.split(' ')[0].lower(), [command.name for command in bot.modules.list_commands()])
if dym is None:
return "Command not found."
else:
return "Command not found. Did you mean: `%s`?" % dym
else:
return result
bot.command = command_with_didyoumean
commands = []
|
{
"content_hash": "d9b0e72a3cc9b8ce1ffd6a3ebfc2fcf1",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 116,
"avg_line_length": 27.714285714285715,
"alnum_prop": 0.5601374570446735,
"repo_name": "Mego/DataBot",
"id": "0803a15dec0c61794c383714c5f5adc2f04d317e",
"size": "1164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SE-Chatbot/botbuiltins/didyoumean.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "322"
},
{
"name": "JavaScript",
"bytes": "19383"
},
{
"name": "Makefile",
"bytes": "891"
},
{
"name": "Python",
"bytes": "176137"
},
{
"name": "Shell",
"bytes": "1447"
}
],
"symlink_target": ""
}
|
import sys
import paramiko
from contextlib import closing
from scpclient import *
import re
import logging
class CX:
def __init__(self, shell=None):
self.shell = shell
return
def login_connect(self, username, password):
""" Connect to the CX1 login node. """
logging.info("Connecting to login node...")
try:
self.shell = paramiko.SSHClient()
self.shell.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.shell.connect("login.cx1.hpc.ic.ac.uk", username=username, password=password)
except paramiko.SSHException, e:
logging.exception(str(e))
return False
self.temp_dir = "/tmp/tmp_%s" % (username)
logging.info("Connected successfully.")
return True
def login_disconnect(self):
logging.info("Disconnecting from login node...")
try:
self.shell.close()
except paramiko.SSHException as e:
logging.error(str(e))
return False
except AttributeError as e:
logging.error("Cannot disconnect - no connection exists!")
return False
logging.info("Disconnected successfully.")
return True
def send_command(self, command):
stdin, stdout, stderr = self.shell.exec_command(command)
stdin.close()
response = stdout.readlines()
return response
def get_jobs_list(self):
response = self.send_command("qstat -a")
# Ignore header lines
response = response[5:]
jobs = []
number_of_jobs = len(response)
for i in range(number_of_jobs):
if(response[i] != ""):
response[i] = response[i].replace("\n", "")
jobs.append(response[i])
return jobs
def get_nodes_list(self, job_id):
response = self.send_command("qstat -an")
for i in range(len(response)):
if(job_id in response[i]):
break
# Handle the case where the nodes list is split over multiple lines.
s = ""
for j in range(i+1, len(response)):
if(not ".cx1b" in response[j]):
s += response[j]
else:
break
# Remove whitespace.
s = s.replace("\n", "")
s = s.replace("\r", "")
s = s.replace(" ", "")
# Extract the nodes
nodes = []
pattern = re.compile("((cx1-\d+-\d+-\d+)/\d+\*\d+\+*)")
for match in re.findall(pattern, s):
nodes.append(match[1])
return nodes
def clean_temp(self):
logging.info("Cleaning temp directory (%s)" % self.temp_dir)
self.send_command("rm -rf %s; mkdir %s" % (self.temp_dir, self.temp_dir))
return
def ls_on_node(self, job_id, node):
logging.info("Listing files on node %s..." % node)
response = self.send_command("ssh %s \"ls /tmp/pbs.%s/\"" % (node, job_id))
return response
def get_data_from_node(self, job_id, node, pattern):
logging.info("Getting data from node %s..." % node)
self.send_command("scp -r %s:/tmp/pbs.%s/*%s* %s" % (node, job_id, pattern, self.temp_dir))
return
def get_data_from_cx(self, pattern):
logging.info("Downloading data from CX1...")
with closing(ReadDir(self.shell.get_transport(), '%s' % self.temp_dir)) as scp:
scp.receive_dir('.', preserve_times=True)
return
|
{
"content_hash": "5d0d217feddf708ff2b8a424c02065d8",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 97,
"avg_line_length": 30.54054054054054,
"alnum_prop": 0.5752212389380531,
"repo_name": "ctjacobs/cxqwatch",
"id": "18aa28245c36a17070e4489ad1df5e4c79a18023",
"size": "4583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cx.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23326"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from janyson.options import Options
class OptionsTestCase(TestCase):
def setUp(self):
self.options = Options()
def test_default_options(self):
for option, value in Options.default_options.items():
self.assertEqual(getattr(self.options, option), value)
def test_get_nonexistent(self):
with self.assertRaisesRegexp(AttributeError, "Missing option"):
self.options.key
def test_set_and_get(self):
self.options.key = 'value'
self.assertEqual(self.options.key, 'value')
def test_set_and_delete(self):
self.options.key = 'value'
del self.options.key
with self.assertRaisesRegexp(AttributeError, "Missing option"):
self.options.key
def test_delete_nonexistent(self):
with self.assertRaisesRegexp(AttributeError, "Missing option"):
del self.options.key
|
{
"content_hash": "4858d6f34b21dc9a21cc77e9e58ab57c",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 71,
"avg_line_length": 30,
"alnum_prop": 0.6634408602150538,
"repo_name": "un-def/django-janyson",
"id": "ab4496833d523710d27e81b64c9994534854ad29",
"size": "955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tests_options.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "40930"
},
{
"name": "Shell",
"bytes": "174"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns # noqa
from django.conf.urls import url # noqa
from dlux.dashboards.network.neutron_ports import views
urlpatterns = patterns(
'dlux.dashboards.network.neutron_ports.views',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'(?P<port_id>[^/]+)/detail$',
views.DetailView.as_view(), name='detail'),
)
|
{
"content_hash": "2ea925969146f0e3f3d2a15850706a85",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 56,
"avg_line_length": 33.63636363636363,
"alnum_prop": 0.6837837837837838,
"repo_name": "ekarlso/dlux-horizon",
"id": "0ace76c3520be73802cd4ee5dd3ade347f1065b2",
"size": "1024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dlux/dashboards/network/neutron_ports/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "163296"
},
{
"name": "JavaScript",
"bytes": "10251"
},
{
"name": "Python",
"bytes": "120062"
},
{
"name": "Shell",
"bytes": "12382"
}
],
"symlink_target": ""
}
|
import hashlib
from charms.reactive import when, when_not
from charms.reactive import is_state, set_state, remove_state
from charmhelpers.core import hookenv
from charms.layer.apache_bigtop_base import get_package_version
from charms.layer.bigtop_zeppelin import Zeppelin
from charms.reactive.helpers import data_changed
@when('zeppelin.installed')
def update_status():
hadoop_joined = is_state('hadoop.joined')
hadoop_ready = is_state('hadoop.ready')
hive_joined = is_state('hive.connected')
hive_ready = is_state('hive.available')
spark_joined = is_state('spark.joined')
spark_ready = is_state('spark.ready')
waiting_apps = []
ready_apps = []
# Check status of the hadoop plugin
if hadoop_joined and not hadoop_ready:
waiting_apps.append('hadoop')
elif hadoop_ready:
ready_apps.append('hadoop')
# Check status of Hive
if hive_joined and not hive_ready:
waiting_apps.append('hive')
elif hive_ready:
ready_apps.append('hive')
# Check status of Spark
if spark_joined and not spark_ready:
waiting_apps.append('spark')
elif spark_ready:
ready_apps.append('spark')
# Set appropriate status based on the apps we checked above
if waiting_apps:
hookenv.status_set('waiting',
'waiting for: {}'.format(' & '.join(waiting_apps)))
elif ready_apps:
hookenv.status_set('active',
'ready with: {}'.format(' & '.join(ready_apps)))
else:
hookenv.status_set('active', 'ready')
@when('bigtop.available')
@when_not('zeppelin.installed')
def initial_setup():
hookenv.status_set('maintenance', 'installing zeppelin')
zeppelin = Zeppelin()
zeppelin.install()
zeppelin.setup_etc_env()
zeppelin.open_ports()
set_state('zeppelin.installed')
update_status()
# set app version string for juju status output
zeppelin_version = get_package_version('zeppelin') or 'unknown'
hookenv.application_version_set(zeppelin_version)
@when('zeppelin.installed')
@when('hadoop.ready')
@when_not('zeppelin.hadoop.configured')
def configure_hadoop(hadoop):
zeppelin = Zeppelin()
zeppelin.configure_hadoop()
zeppelin.register_hadoop_notebooks()
set_state('zeppelin.hadoop.configured')
@when('zeppelin.installed')
@when_not('hadoop.ready')
@when('zeppelin.hadoop.configured')
def unconfigure_hadoop():
zeppelin = Zeppelin()
zeppelin.remove_hadoop_notebooks()
remove_state('zeppelin.hadoop.configured')
@when('zeppelin.installed', 'hive.ready')
def configure_hive(hive):
hive_ip = hive.get_private_ip()
hive_port = hive.get_port()
hive_url = 'jdbc:hive2://%s:%s' % (hive_ip, hive_port)
if data_changed('hive.connect', hive_url):
hookenv.status_set('maintenance', 'configuring hive')
zeppelin = Zeppelin()
zeppelin.configure_hive(hive_url)
set_state('zeppelin.hive.configured')
update_status()
@when('zeppelin.installed', 'zeppelin.hive.configured')
@when_not('hive.ready')
def unconfigure_hive():
hookenv.status_set('maintenance', 'removing hive relation')
zeppelin = Zeppelin()
zeppelin.configure_hive('jdbc:hive2://:')
remove_state('zeppelin.hive.configured')
update_status()
@when('zeppelin.installed', 'spark.ready')
def configure_spark(spark):
master_url = spark.get_master_url()
if data_changed('spark.master', master_url):
hookenv.status_set('maintenance', 'configuring spark')
zeppelin = Zeppelin()
zeppelin.configure_spark(master_url)
set_state('zeppelin.spark.configured')
update_status()
@when('zeppelin.installed', 'zeppelin.spark.configured')
@when_not('spark.ready')
def unconfigure_spark():
hookenv.status_set('maintenance', 'removing spark relation')
zeppelin = Zeppelin()
# Yarn / Hadoop may not actually be available, but that is the default
# value and nothing else would reasonably work here either without Spark.
zeppelin.configure_spark('yarn-client')
data_changed('spark.master', 'yarn-client') # ensure updated if re-added
remove_state('zeppelin.spark.configured')
update_status()
@when('zeppelin.started', 'client.notebook.registered')
def register_notebook(client):
zeppelin = Zeppelin()
for notebook in client.unregistered_notebooks():
notebook_md5 = hashlib.md5(notebook.encode('utf8')).hexdigest()
if zeppelin.register_notebook(notebook_md5, notebook):
client.accept_notebook(notebook)
else:
client.reject_notebook(notebook)
@when('zeppelin.started', 'client.notebook.removed')
def remove_notebook(client):
zeppelin = Zeppelin()
for notebook in client.unremoved_notebooks():
notebook_md5 = hashlib.md5(notebook.encode('utf8')).hexdigest()
zeppelin.remove_notebook(notebook_md5)
client.remove_notebook(notebook)
|
{
"content_hash": "e5dfce132b09670b58f30e187182a109",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 78,
"avg_line_length": 33.16107382550336,
"alnum_prop": 0.6798218984011334,
"repo_name": "evans-ye/bigtop",
"id": "6d37fdccc6fb9db5d6df6fdcb5b236876347545b",
"size": "5723",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bigtop-packages/src/charm/zeppelin/layer-zeppelin/reactive/zeppelin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4822"
},
{
"name": "Groovy",
"bytes": "585902"
},
{
"name": "Java",
"bytes": "650834"
},
{
"name": "Makefile",
"bytes": "63715"
},
{
"name": "PigLatin",
"bytes": "15615"
},
{
"name": "Puppet",
"bytes": "173082"
},
{
"name": "Python",
"bytes": "202005"
},
{
"name": "Roff",
"bytes": "49282"
},
{
"name": "Ruby",
"bytes": "17453"
},
{
"name": "Scala",
"bytes": "85334"
},
{
"name": "Shell",
"bytes": "637222"
},
{
"name": "XSLT",
"bytes": "1323"
}
],
"symlink_target": ""
}
|
sentences = list()
while True:
txt = raw_input("Skriv en mening: ")
if txt == 'END':
break
sentences.append(txt)
outFile = open('mywords.out', 'w')
for sentence in sentences:
outFile.write(sentence)
outFile.write('\n')
outFile.close()
# empty line
print ""
fh = open("mywords.out")
for line in fh:
print line.rstrip().upper()
|
{
"content_hash": "e25d1835a92d0c7863a284b3acd082f4",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 40,
"avg_line_length": 17.714285714285715,
"alnum_prop": 0.6102150537634409,
"repo_name": "CoderDojo-Karlskrona/python-exercises",
"id": "68cd414ed68bbb89ac3be4e0defb00d5bf198b20",
"size": "372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex13/filewriter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5690"
}
],
"symlink_target": ""
}
|
from model.contact import Contact
import random
def test_edit_contact_by_index(app, db, check_ui):
if app.contact.count_contacts() == 0:
app.contact.create(Contact(firstname="For modify", birth_date="//div[@id='content']/form/select[1]//option[1]",
birth_month="//div[@id='content']/form/select[2]//option[1]",
anniversary_date="//div[@id='content']/form/select[3]//option[1]",
anniversary_month="//div[@id='content']/form/select[4]//option[1]"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
input_contact = Contact(firstname="Отредактирован", middlename="Отредактирович",
lastname="Отредактированский", nickname="Редактор",
companyname='ОАО "Редакция и Мир"', address="редакторский городок",
homenumber="567-22-04", worknumber="45+6", email="glavred@mir.ur",
notes="Здесь могла бы быть ваша реклама", email2="",
birth_date="//div[@id='content']/form/select[1]//option[4]",
birth_month="//div[@id='content']/form/select[2]//option[5]", birth_year="",
anniversary_date="//div[@id='content']/form/select[3]//option[6]",
anniversary_month="//div[@id='content']/form/select[4]//option[7]",
mobilenumber="12345678", secondarynumber="(098)76543")
input_contact.id = contact.id
app.contact.edit_contact_by_id(contact.id, input_contact)
# Test validation
new_contacts = db.get_contact_list()
assert len(old_contacts) == len(new_contacts)
idx = int(old_contacts.index(contact))
old_contacts[idx] = input_contact
assert old_contacts == new_contacts
if check_ui:
new_contacts = map(app.contact.clean, db.get_contact_list())
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
|
{
"content_hash": "2b45b287c076934233546ab55776551f",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 123,
"avg_line_length": 66.27272727272727,
"alnum_prop": 0.5518975765889346,
"repo_name": "AklerQ/python_training",
"id": "f15645e2718a43f06b2355f1fd95c770c6af6473",
"size": "2326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_edit_contact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "47703"
}
],
"symlink_target": ""
}
|
import collections
import json
from telemetry.core import util
from telemetry.page import page_benchmark
class SunSpiderBenchark(page_benchmark.PageBenchmark):
def MeasurePage(self, _, tab, results):
js_is_done = """
window.location.pathname.indexOf('sunspider-results') >= 0"""
def _IsDone():
return tab.EvaluateJavaScript(js_is_done)
util.WaitFor(_IsDone, 300, poll_interval=5)
js_get_results = 'JSON.stringify(output);'
js_results = json.loads(tab.EvaluateJavaScript(js_get_results))
r = collections.defaultdict(list)
totals = []
# js_results is: [{'foo': v1, 'bar': v2},
# {'foo': v3, 'bar': v4},
# ...]
for result in js_results:
total = 0
for key, value in result.iteritems():
r[key].append(value)
total += value
totals.append(total)
for key, values in r.iteritems():
results.Add('t', 'ms', values, chart_name=key, data_type='unimportant')
results.Add('t', 'ms', totals, chart_name='total')
|
{
"content_hash": "f9b97b9b92dfb97a99d05ffd6c94d701",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 77,
"avg_line_length": 33.29032258064516,
"alnum_prop": 0.625968992248062,
"repo_name": "zcbenz/cefode-chromium",
"id": "f976216de30df28cf93d3990d94691563fbc9796",
"size": "1199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/perf/perf_tools/sunspider.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1174304"
},
{
"name": "Awk",
"bytes": "9519"
},
{
"name": "C",
"bytes": "76026099"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "157904700"
},
{
"name": "DOT",
"bytes": "1559"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Java",
"bytes": "3225038"
},
{
"name": "JavaScript",
"bytes": "18180217"
},
{
"name": "Logos",
"bytes": "4517"
},
{
"name": "Matlab",
"bytes": "5234"
},
{
"name": "Objective-C",
"bytes": "7139426"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "932901"
},
{
"name": "Python",
"bytes": "8654916"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3621"
},
{
"name": "Shell",
"bytes": "1533012"
},
{
"name": "Tcl",
"bytes": "277077"
},
{
"name": "XML",
"bytes": "13493"
}
],
"symlink_target": ""
}
|
import json
from functools import wraps
from flask import Blueprint, current_app, jsonify, request
from app.celery.tasks import (
record_daily_sorted_counts,
update_letter_notifications_statuses,
)
from app.config import QueueNames
from app.notifications.utils import autoconfirm_subscription
from app.schema_validation import validate
from app.v2.errors import register_errors
letter_callback_blueprint = Blueprint("notifications_letter_callback", __name__)
register_errors(letter_callback_blueprint)
dvla_sns_callback_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"description": "sns callback received on s3 update",
"type": "object",
"title": "dvla internal sns callback",
"properties": {
"Type": {"enum": ["Notification", "SubscriptionConfirmation"]},
"MessageId": {"type": "string"},
"Message": {"type": ["string", "object"]},
},
"required": ["Type", "MessageId", "Message"],
}
def validate_schema(schema):
def decorator(f):
@wraps(f)
def wrapper(*args, **kw):
validate(request.get_json(force=True), schema)
return f(*args, **kw)
return wrapper
return decorator
@letter_callback_blueprint.route("/notifications/letter/dvla", methods=["POST"])
@validate_schema(dvla_sns_callback_schema)
def process_letter_response():
req_json = request.get_json(force=True)
current_app.logger.debug("Received SNS callback: {}".format(req_json))
if not autoconfirm_subscription(req_json):
# The callback should have one record for an S3 Put Event.
message = json.loads(req_json["Message"])
filename = message["Records"][0]["s3"]["object"]["key"]
current_app.logger.info("Received file from DVLA: {}".format(filename))
if filename.lower().endswith("rs.txt") or filename.lower().endswith("rsp.txt"):
current_app.logger.info("DVLA callback: Calling task to update letter notifications")
update_letter_notifications_statuses.apply_async([filename], queue=QueueNames.NOTIFY)
record_daily_sorted_counts.apply_async([filename], queue=QueueNames.NOTIFY)
return jsonify(result="success", message="DVLA callback succeeded"), 200
|
{
"content_hash": "176c4ad6e35c7910eac5fb952d138c0c",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 97,
"avg_line_length": 36.81967213114754,
"alnum_prop": 0.6821015138023152,
"repo_name": "alphagov/notifications-api",
"id": "121dcb3e194259bd7abe253e3c3048ebbed38d3f",
"size": "2246",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "app/notifications/notifications_letter_callback.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "719"
},
{
"name": "Jinja",
"bytes": "5543"
},
{
"name": "Makefile",
"bytes": "6627"
},
{
"name": "Mako",
"bytes": "361"
},
{
"name": "Procfile",
"bytes": "35"
},
{
"name": "Python",
"bytes": "3506225"
},
{
"name": "Shell",
"bytes": "13179"
}
],
"symlink_target": ""
}
|
from sqlalchemy.test.testing import assert_raises, assert_raises_message
import sqlalchemy as sa
from sqlalchemy import Integer, PickleType, String
import operator
from sqlalchemy.test import testing
from sqlalchemy.util import OrderedSet
from sqlalchemy.orm import mapper, relationship, create_session, PropComparator, \
synonym, comparable_property, sessionmaker, attributes
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.orm.interfaces import MapperOption
from sqlalchemy.test.testing import eq_, ne_
from test.orm import _base, _fixtures
from sqlalchemy.test.schema import Table, Column
class MergeTest(_fixtures.FixtureTest):
"""Session.merge() functionality"""
run_inserts = None
def on_load_tracker(self, cls, canary=None):
if canary is None:
def canary(instance):
canary.called += 1
canary.called = 0
manager = sa.orm.attributes.manager_of_class(cls)
manager.events.add_listener('on_load', canary)
return canary
@testing.resolve_artifact_names
def test_transient_to_pending(self):
mapper(User, users)
sess = create_session()
on_load = self.on_load_tracker(User)
u = User(id=7, name='fred')
eq_(on_load.called, 0)
u2 = sess.merge(u)
eq_(on_load.called, 1)
assert u2 in sess
eq_(u2, User(id=7, name='fred'))
sess.flush()
sess.expunge_all()
eq_(sess.query(User).first(), User(id=7, name='fred'))
@testing.resolve_artifact_names
def test_transient_to_pending_no_pk(self):
"""test that a transient object with no PK attribute doesn't trigger a needless load."""
mapper(User, users)
sess = create_session()
u = User(name='fred')
def go():
sess.merge(u)
self.assert_sql_count(testing.db, go, 0)
@testing.resolve_artifact_names
def test_transient_to_pending_collection(self):
mapper(User, users, properties={
'addresses': relationship(Address, backref='user',
collection_class=OrderedSet)})
mapper(Address, addresses)
on_load = self.on_load_tracker(User)
self.on_load_tracker(Address, on_load)
u = User(id=7, name='fred', addresses=OrderedSet([
Address(id=1, email_address='fred1'),
Address(id=2, email_address='fred2'),
]))
eq_(on_load.called, 0)
sess = create_session()
sess.merge(u)
eq_(on_load.called, 3)
merged_users = [e for e in sess if isinstance(e, User)]
eq_(len(merged_users), 1)
assert merged_users[0] is not u
sess.flush()
sess.expunge_all()
eq_(sess.query(User).one(),
User(id=7, name='fred', addresses=OrderedSet([
Address(id=1, email_address='fred1'),
Address(id=2, email_address='fred2'),
]))
)
@testing.resolve_artifact_names
def test_transient_to_persistent(self):
mapper(User, users)
on_load = self.on_load_tracker(User)
sess = create_session()
u = User(id=7, name='fred')
sess.add(u)
sess.flush()
sess.expunge_all()
eq_(on_load.called, 0)
_u2 = u2 = User(id=7, name='fred jones')
eq_(on_load.called, 0)
u2 = sess.merge(u2)
assert u2 is not _u2
eq_(on_load.called, 1)
sess.flush()
sess.expunge_all()
eq_(sess.query(User).first(), User(id=7, name='fred jones'))
eq_(on_load.called, 2)
@testing.resolve_artifact_names
def test_transient_to_persistent_collection(self):
mapper(User, users, properties={
'addresses':relationship(Address,
backref='user',
collection_class=OrderedSet,
order_by=addresses.c.id,
cascade="all, delete-orphan")
})
mapper(Address, addresses)
on_load = self.on_load_tracker(User)
self.on_load_tracker(Address, on_load)
u = User(id=7, name='fred', addresses=OrderedSet([
Address(id=1, email_address='fred1'),
Address(id=2, email_address='fred2'),
]))
sess = create_session()
sess.add(u)
sess.flush()
sess.expunge_all()
eq_(on_load.called, 0)
u = User(id=7, name='fred', addresses=OrderedSet([
Address(id=3, email_address='fred3'),
Address(id=4, email_address='fred4'),
]))
u = sess.merge(u)
# 1. merges User object. updates into session.
# 2.,3. merges Address ids 3 & 4, saves into session.
# 4.,5. loads pre-existing elements in "addresses" collection,
# marks as deleted, Address ids 1 and 2.
eq_(on_load.called, 5)
eq_(u,
User(id=7, name='fred', addresses=OrderedSet([
Address(id=3, email_address='fred3'),
Address(id=4, email_address='fred4'),
]))
)
sess.flush()
sess.expunge_all()
eq_(sess.query(User).one(),
User(id=7, name='fred', addresses=OrderedSet([
Address(id=3, email_address='fred3'),
Address(id=4, email_address='fred4'),
]))
)
@testing.resolve_artifact_names
def test_detached_to_persistent_collection(self):
mapper(User, users, properties={
'addresses':relationship(Address,
backref='user',
order_by=addresses.c.id,
collection_class=OrderedSet)})
mapper(Address, addresses)
on_load = self.on_load_tracker(User)
self.on_load_tracker(Address, on_load)
a = Address(id=1, email_address='fred1')
u = User(id=7, name='fred', addresses=OrderedSet([
a,
Address(id=2, email_address='fred2'),
]))
sess = create_session()
sess.add(u)
sess.flush()
sess.expunge_all()
u.name='fred jones'
u.addresses.add(Address(id=3, email_address='fred3'))
u.addresses.remove(a)
eq_(on_load.called, 0)
u = sess.merge(u)
eq_(on_load.called, 4)
sess.flush()
sess.expunge_all()
eq_(sess.query(User).first(),
User(id=7, name='fred jones', addresses=OrderedSet([
Address(id=2, email_address='fred2'),
Address(id=3, email_address='fred3')])))
@testing.resolve_artifact_names
def test_unsaved_cascade(self):
"""Merge of a transient entity with two child transient entities, with a bidirectional relationship."""
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),
cascade="all", backref="user")
})
on_load = self.on_load_tracker(User)
self.on_load_tracker(Address, on_load)
sess = create_session()
u = User(id=7, name='fred')
a1 = Address(email_address='foo@bar.com')
a2 = Address(email_address='hoho@bar.com')
u.addresses.append(a1)
u.addresses.append(a2)
u2 = sess.merge(u)
eq_(on_load.called, 3)
eq_(u,
User(id=7, name='fred', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@bar.com')]))
eq_(u2,
User(id=7, name='fred', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@bar.com')]))
sess.flush()
sess.expunge_all()
u2 = sess.query(User).get(7)
eq_(u2, User(id=7, name='fred', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@bar.com')]))
eq_(on_load.called, 6)
@testing.resolve_artifact_names
def test_merge_empty_attributes(self):
mapper(User, dingalings)
sess = create_session()
# merge empty stuff. goes in as NULL.
# not sure what this was originally trying to
# test.
u1 = sess.merge(User(id=1))
sess.flush()
assert u1.data is None
# save another user with "data"
u2 = User(id=2, data="foo")
sess.add(u2)
sess.flush()
# merge User on u2's pk with
# no "data".
# value isn't whacked from the destination
# dict.
u3 = sess.merge(User(id=2))
eq_(u3.__dict__['data'], "foo")
# make a change.
u3.data = 'bar'
# merge another no-"data" user.
# attribute maintains modified state.
# (usually autoflush would have happened
# here anyway).
u4 = sess.merge(User(id=2))
eq_(u3.__dict__['data'], "bar")
sess.flush()
# and after the flush.
eq_(u3.data, "bar")
# new row.
u5 = User(id=3, data="foo")
sess.add(u5)
sess.flush()
# blow it away from u5, but don't
# mark as expired. so it would just
# be blank.
del u5.data
# the merge adds expiry to the
# attribute so that it loads.
# not sure if I like this - it currently is needed
# for test_pickled:PickleTest.test_instance_deferred_cols
u6 = sess.merge(User(id=3))
assert 'data' not in u6.__dict__
assert u6.data == "foo"
# set it to None. this is actually
# a change so gets preserved.
u6.data = None
u7 = sess.merge(User(id=3))
assert u6.__dict__['data'] is None
@testing.resolve_artifact_names
def test_merge_irregular_collection(self):
mapper(User, users, properties={
'addresses': relationship(
mapper(Address, addresses),
backref='user',
collection_class=attribute_mapped_collection('email_address')),
})
u1 = User(id=7, name='fred')
u1.addresses['foo@bar.com'] = Address(email_address='foo@bar.com')
sess = create_session()
sess.merge(u1)
sess.flush()
assert u1.addresses.keys() == ['foo@bar.com']
@testing.resolve_artifact_names
def test_attribute_cascade(self):
"""Merge of a persistent entity with two child persistent entities."""
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses), backref='user')
})
on_load = self.on_load_tracker(User)
self.on_load_tracker(Address, on_load)
sess = create_session()
# set up data and save
u = User(id=7, name='fred', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address = 'hoho@la.com')])
sess.add(u)
sess.flush()
# assert data was saved
sess2 = create_session()
u2 = sess2.query(User).get(7)
eq_(u2,
User(id=7, name='fred', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@la.com')]))
# make local changes to data
u.name = 'fred2'
u.addresses[1].email_address = 'hoho@lalala.com'
eq_(on_load.called, 3)
# new session, merge modified data into session
sess3 = create_session()
u3 = sess3.merge(u)
eq_(on_load.called, 6)
# ensure local changes are pending
eq_(u3, User(id=7, name='fred2', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@lalala.com')]))
# save merged data
sess3.flush()
# assert modified/merged data was saved
sess.expunge_all()
u = sess.query(User).get(7)
eq_(u, User(id=7, name='fred2', addresses=[
Address(email_address='foo@bar.com'),
Address(email_address='hoho@lalala.com')]))
eq_(on_load.called, 9)
# merge persistent object into another session
sess4 = create_session()
u = sess4.merge(u)
assert len(u.addresses)
for a in u.addresses:
assert a.user is u
def go():
sess4.flush()
# no changes; therefore flush should do nothing
self.assert_sql_count(testing.db, go, 0)
eq_(on_load.called, 12)
# test with "dontload" merge
sess5 = create_session()
u = sess5.merge(u, load=False)
assert len(u.addresses)
for a in u.addresses:
assert a.user is u
def go():
sess5.flush()
# no changes; therefore flush should do nothing
# but also, load=False wipes out any difference in committed state,
# so no flush at all
self.assert_sql_count(testing.db, go, 0)
eq_(on_load.called, 15)
sess4 = create_session()
u = sess4.merge(u, load=False)
# post merge change
u.addresses[1].email_address='afafds'
def go():
sess4.flush()
# afafds change flushes
self.assert_sql_count(testing.db, go, 1)
eq_(on_load.called, 18)
sess5 = create_session()
u2 = sess5.query(User).get(u.id)
eq_(u2.name, 'fred2')
eq_(u2.addresses[1].email_address, 'afafds')
eq_(on_load.called, 21)
@testing.resolve_artifact_names
def test_no_relationship_cascade(self):
"""test that merge doesn't interfere with a relationship()
target that specifically doesn't include 'merge' cascade.
"""
mapper(Address, addresses, properties={
'user':relationship(User, cascade="save-update")
})
mapper(User, users)
sess = create_session()
u1 = User(name="fred")
a1 = Address(email_address="asdf", user=u1)
sess.add(a1)
sess.flush()
a2 = Address(id=a1.id, email_address="bar", user=User(name="hoho"))
a2 = sess.merge(a2)
sess.flush()
# no expire of the attribute
assert a2.__dict__['user'] is u1
# merge succeeded
eq_(
sess.query(Address).all(),
[Address(id=a1.id, email_address="bar")]
)
# didn't touch user
eq_(
sess.query(User).all(),
[User(name="fred")]
)
@testing.resolve_artifact_names
def test_one_to_many_cascade(self):
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses))})
on_load = self.on_load_tracker(User)
self.on_load_tracker(Address, on_load)
sess = create_session()
u = User(name='fred')
a1 = Address(email_address='foo@bar')
a2 = Address(email_address='foo@quux')
u.addresses.extend([a1, a2])
sess.add(u)
sess.flush()
eq_(on_load.called, 0)
sess2 = create_session()
u2 = sess2.query(User).get(u.id)
eq_(on_load.called, 1)
u.addresses[1].email_address = 'addr 2 modified'
sess2.merge(u)
eq_(u2.addresses[1].email_address, 'addr 2 modified')
eq_(on_load.called, 3)
sess3 = create_session()
u3 = sess3.query(User).get(u.id)
eq_(on_load.called, 4)
u.name = 'also fred'
sess3.merge(u)
eq_(on_load.called, 6)
eq_(u3.name, 'also fred')
@testing.resolve_artifact_names
def test_many_to_one_cascade(self):
mapper(Address, addresses, properties={
'user':relationship(User)
})
mapper(User, users)
u1 = User(id=1, name="u1")
a1 =Address(id=1, email_address="a1", user=u1)
u2 = User(id=2, name="u2")
sess = create_session()
sess.add_all([a1, u2])
sess.flush()
a1.user = u2
sess2 = create_session()
a2 = sess2.merge(a1)
eq_(
attributes.get_history(a2, 'user'),
([u2], (), [attributes.PASSIVE_NO_RESULT])
)
assert a2 in sess2.dirty
sess.refresh(a1)
sess2 = create_session()
a2 = sess2.merge(a1, load=False)
eq_(
attributes.get_history(a2, 'user'),
((), [u1], ())
)
assert a2 not in sess2.dirty
@testing.resolve_artifact_names
def test_many_to_many_cascade(self):
mapper(Order, orders, properties={
'items':relationship(mapper(Item, items), secondary=order_items)})
on_load = self.on_load_tracker(Order)
self.on_load_tracker(Item, on_load)
sess = create_session()
i1 = Item()
i1.description='item 1'
i2 = Item()
i2.description = 'item 2'
o = Order()
o.description = 'order description'
o.items.append(i1)
o.items.append(i2)
sess.add(o)
sess.flush()
eq_(on_load.called, 0)
sess2 = create_session()
o2 = sess2.query(Order).get(o.id)
eq_(on_load.called, 1)
o.items[1].description = 'item 2 modified'
sess2.merge(o)
eq_(o2.items[1].description, 'item 2 modified')
eq_(on_load.called, 3)
sess3 = create_session()
o3 = sess3.query(Order).get(o.id)
eq_( on_load.called, 4)
o.description = 'desc modified'
sess3.merge(o)
eq_(on_load.called, 6)
eq_(o3.description, 'desc modified')
@testing.resolve_artifact_names
def test_one_to_one_cascade(self):
mapper(User, users, properties={
'address':relationship(mapper(Address, addresses),uselist = False)
})
on_load = self.on_load_tracker(User)
self.on_load_tracker(Address, on_load)
sess = create_session()
u = User()
u.id = 7
u.name = "fred"
a1 = Address()
a1.email_address='foo@bar.com'
u.address = a1
sess.add(u)
sess.flush()
eq_(on_load.called, 0)
sess2 = create_session()
u2 = sess2.query(User).get(7)
eq_(on_load.called, 1)
u2.name = 'fred2'
u2.address.email_address = 'hoho@lalala.com'
eq_(on_load.called, 2)
u3 = sess.merge(u2)
eq_(on_load.called, 2)
assert u3 is u
@testing.resolve_artifact_names
def test_value_to_none(self):
mapper(User, users, properties={
'address':relationship(mapper(Address, addresses),uselist = False, backref='user')
})
sess = sessionmaker()()
u = User(id=7, name="fred", address=Address(id=1, email_address='foo@bar.com'))
sess.add(u)
sess.commit()
sess.close()
u2 = User(id=7, name=None, address=None)
u3 = sess.merge(u2)
assert u3.name is None
assert u3.address is None
sess.close()
a1 = Address(id=1, user=None)
a2 = sess.merge(a1)
assert a2.user is None
@testing.resolve_artifact_names
def test_transient_no_load(self):
mapper(User, users)
sess = create_session()
u = User()
assert_raises_message(sa.exc.InvalidRequestError, "load=False option does not support", sess.merge, u, load=False)
@testing.resolve_artifact_names
def test_dont_load_deprecated(self):
mapper(User, users)
sess = create_session()
u = User(name='ed')
sess.add(u)
sess.flush()
u = sess.query(User).first()
sess.expunge(u)
sess.execute(users.update().values(name='jack'))
@testing.uses_deprecated("dont_load=True has been renamed")
def go():
u1 = sess.merge(u, dont_load=True)
assert u1 in sess
assert u1.name=='ed'
assert u1 not in sess.dirty
go()
@testing.resolve_artifact_names
def test_no_load_with_backrefs(self):
"""load=False populates relationships in both directions without requiring a load"""
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses), backref='user')
})
u = User(id=7, name='fred', addresses=[
Address(email_address='ad1'),
Address(email_address='ad2')])
sess = create_session()
sess.add(u)
sess.flush()
sess.close()
assert 'user' in u.addresses[1].__dict__
sess = create_session()
u2 = sess.merge(u, load=False)
assert 'user' in u2.addresses[1].__dict__
eq_(u2.addresses[1].user, User(id=7, name='fred'))
sess.expire(u2.addresses[1], ['user'])
assert 'user' not in u2.addresses[1].__dict__
sess.close()
sess = create_session()
u = sess.merge(u2, load=False)
assert 'user' not in u.addresses[1].__dict__
eq_(u.addresses[1].user, User(id=7, name='fred'))
@testing.resolve_artifact_names
def test_dontload_with_eager(self):
"""
This test illustrates that with load=False, we can't just copy the
committed_state of the merged instance over; since it references
collection objects which themselves are to be merged. This
committed_state would instead need to be piecemeal 'converted' to
represent the correct objects. However, at the moment I'd rather not
support this use case; if you are merging with load=False, you're
typically dealing with caching and the merged objects shouldnt be
'dirty'.
"""
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses))
})
sess = create_session()
u = User()
u.id = 7
u.name = "fred"
a1 = Address()
a1.email_address='foo@bar.com'
u.addresses.append(a1)
sess.add(u)
sess.flush()
sess2 = create_session()
u2 = sess2.query(User).options(sa.orm.joinedload('addresses')).get(7)
sess3 = create_session()
u3 = sess3.merge(u2, load=False)
def go():
sess3.flush()
self.assert_sql_count(testing.db, go, 0)
@testing.resolve_artifact_names
def test_no_load_disallows_dirty(self):
"""load=False doesnt support 'dirty' objects right now
(see test_no_load_with_eager()). Therefore lets assert it.
"""
mapper(User, users)
sess = create_session()
u = User()
u.id = 7
u.name = "fred"
sess.add(u)
sess.flush()
u.name = 'ed'
sess2 = create_session()
try:
sess2.merge(u, load=False)
assert False
except sa.exc.InvalidRequestError, e:
assert ("merge() with load=False option does not support "
"objects marked as 'dirty'. flush() all changes on mapped "
"instances before merging with load=False.") in str(e)
u2 = sess2.query(User).get(7)
sess3 = create_session()
u3 = sess3.merge(u2, load=False)
assert not sess3.dirty
def go():
sess3.flush()
self.assert_sql_count(testing.db, go, 0)
@testing.resolve_artifact_names
def test_no_load_sets_backrefs(self):
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),backref='user')})
sess = create_session()
u = User()
u.id = 7
u.name = "fred"
a1 = Address()
a1.email_address='foo@bar.com'
u.addresses.append(a1)
sess.add(u)
sess.flush()
assert u.addresses[0].user is u
sess2 = create_session()
u2 = sess2.merge(u, load=False)
assert not sess2.dirty
def go():
assert u2.addresses[0].user is u2
self.assert_sql_count(testing.db, go, 0)
@testing.resolve_artifact_names
def test_no_load_preserves_parents(self):
"""Merge with load=False does not trigger a 'delete-orphan' operation.
merge with load=False sets attributes without using events. this means
the 'hasparent' flag is not propagated to the newly merged instance.
in fact this works out OK, because the '_state.parents' collection on
the newly merged instance is empty; since the mapper doesn't see an
active 'False' setting in this collection when _is_orphan() is called,
it does not count as an orphan (i.e. this is the 'optimistic' logic in
mapper._is_orphan().)
"""
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),
backref='user', cascade="all, delete-orphan")})
sess = create_session()
u = User()
u.id = 7
u.name = "fred"
a1 = Address()
a1.email_address='foo@bar.com'
u.addresses.append(a1)
sess.add(u)
sess.flush()
assert u.addresses[0].user is u
sess2 = create_session()
u2 = sess2.merge(u, load=False)
assert not sess2.dirty
a2 = u2.addresses[0]
a2.email_address='somenewaddress'
assert not sa.orm.object_mapper(a2)._is_orphan(
sa.orm.attributes.instance_state(a2))
sess2.flush()
sess2.expunge_all()
eq_(sess2.query(User).get(u2.id).addresses[0].email_address,
'somenewaddress')
# this use case is not supported; this is with a pending Address on
# the pre-merged object, and we currently dont support 'dirty' objects
# being merged with load=False. in this case, the empty
# '_state.parents' collection would be an issue, since the optimistic
# flag is False in _is_orphan() for pending instances. so if we start
# supporting 'dirty' with load=False, this test will need to pass
sess = create_session()
u = sess.query(User).get(7)
u.addresses.append(Address())
sess2 = create_session()
try:
u2 = sess2.merge(u, load=False)
assert False
# if load=False is changed to support dirty objects, this code
# needs to pass
a2 = u2.addresses[0]
a2.email_address='somenewaddress'
assert not sa.orm.object_mapper(a2)._is_orphan(
sa.orm.attributes.instance_state(a2))
sess2.flush()
sess2.expunge_all()
eq_(sess2.query(User).get(u2.id).addresses[0].email_address,
'somenewaddress')
except sa.exc.InvalidRequestError, e:
assert "load=False option does not support" in str(e)
@testing.resolve_artifact_names
def test_synonym_comparable(self):
class User(object):
class Comparator(PropComparator):
pass
def _getValue(self):
return self._value
def _setValue(self, value):
setattr(self, '_value', value)
value = property(_getValue, _setValue)
mapper(User, users, properties={
'uid':synonym('id'),
'foobar':comparable_property(User.Comparator,User.value),
})
sess = create_session()
u = User()
u.name = 'ed'
sess.add(u)
sess.flush()
sess.expunge(u)
sess.merge(u)
@testing.resolve_artifact_names
def test_cascade_doesnt_blowaway_manytoone(self):
"""a merge test that was fixed by [ticket:1202]"""
s = create_session(autoflush=True)
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),backref='user')})
a1 = Address(user=s.merge(User(id=1, name='ed')), email_address='x')
before_id = id(a1.user)
a2 = Address(user=s.merge(User(id=1, name='jack')), email_address='x')
after_id = id(a1.user)
other_id = id(a2.user)
eq_(before_id, other_id)
eq_(after_id, other_id)
eq_(before_id, after_id)
eq_(a1.user, a2.user)
@testing.resolve_artifact_names
def test_cascades_dont_autoflush(self):
sess = create_session(autoflush=True)
m = mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses),backref='user')})
user = User(id=8, name='fred', addresses=[Address(email_address='user')])
merged_user = sess.merge(user)
assert merged_user in sess.new
sess.flush()
assert merged_user not in sess.new
@testing.resolve_artifact_names
def test_cascades_dont_autoflush_2(self):
mapper(User, users, properties={
'addresses':relationship(Address,
backref='user',
cascade="all, delete-orphan")
})
mapper(Address, addresses)
u = User(id=7, name='fred', addresses=[
Address(id=1, email_address='fred1'),
])
sess = create_session(autoflush=True, autocommit=False)
sess.add(u)
sess.commit()
sess.expunge_all()
u = User(id=7, name='fred', addresses=[
Address(id=1, email_address='fred1'),
Address(id=2, email_address='fred2'),
])
sess.merge(u)
assert sess.autoflush
sess.commit()
@testing.resolve_artifact_names
def test_dont_expire_pending(self):
"""test that pending instances aren't expired during a merge."""
mapper(User, users)
u = User(id=7)
sess = create_session(autoflush=True, autocommit=False)
u = sess.merge(u)
assert not bool(attributes.instance_state(u).expired_attributes)
def go():
eq_(u.name, None)
self.assert_sql_count(testing.db, go, 0)
@testing.resolve_artifact_names
def test_option_state(self):
"""test that the merged takes on the MapperOption characteristics
of that which is merged.
"""
class Option(MapperOption):
propagate_to_loaders = True
opt1, opt2 = Option(), Option()
sess = sessionmaker()()
umapper = mapper(User, users)
sess.add_all([
User(id=1, name='u1'),
User(id=2, name='u2'),
])
sess.commit()
sess2 = sessionmaker()()
s2_users = sess2.query(User).options(opt2).all()
# test 1. no options are replaced by merge options
sess = sessionmaker()()
s1_users = sess.query(User).all()
for u in s1_users:
ustate = attributes.instance_state(u)
eq_(ustate.load_path, ())
eq_(ustate.load_options, set())
for u in s2_users:
sess.merge(u)
for u in s1_users:
ustate = attributes.instance_state(u)
eq_(ustate.load_path, (umapper, ))
eq_(ustate.load_options, set([opt2]))
# test 2. present options are replaced by merge options
sess = sessionmaker()()
s1_users = sess.query(User).options(opt1).all()
for u in s1_users:
ustate = attributes.instance_state(u)
eq_(ustate.load_path, (umapper, ))
eq_(ustate.load_options, set([opt1]))
for u in s2_users:
sess.merge(u)
for u in s1_users:
ustate = attributes.instance_state(u)
eq_(ustate.load_path, (umapper, ))
eq_(ustate.load_options, set([opt2]))
class MutableMergeTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("data", metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('data', PickleType(comparator=operator.eq))
)
@classmethod
def setup_classes(cls):
class Data(_base.ComparableEntity):
pass
@testing.resolve_artifact_names
def test_list(self):
mapper(Data, data)
sess = sessionmaker()()
d = Data(data=["this", "is", "a", "list"])
sess.add(d)
sess.commit()
d2 = Data(id=d.id, data=["this", "is", "another", "list"])
d3 = sess.merge(d2)
eq_(d3.data, ["this", "is", "another", "list"])
class CompositeNullPksTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("data", metadata,
Column('pk1', String(10), primary_key=True),
Column('pk2', String(10), primary_key=True),
)
@classmethod
def setup_classes(cls):
class Data(_base.ComparableEntity):
pass
@testing.resolve_artifact_names
def test_merge_allow_partial(self):
mapper(Data, data)
sess = sessionmaker()()
d1 = Data(pk1="someval", pk2=None)
def go():
return sess.merge(d1)
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_merge_disallow_partial(self):
mapper(Data, data, allow_partial_pks=False)
sess = sessionmaker()()
d1 = Data(pk1="someval", pk2=None)
def go():
return sess.merge(d1)
self.assert_sql_count(testing.db, go, 0)
|
{
"content_hash": "0bd5824bc7a8b0818c64ddaca71f11ec",
"timestamp": "",
"source": "github",
"line_count": 1064,
"max_line_length": 122,
"avg_line_length": 31.771616541353385,
"alnum_prop": 0.5507173495045111,
"repo_name": "dbbhattacharya/kitsune",
"id": "d63d7e086ec0b382cf4f5189944e8d73280f45fb",
"size": "33805",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "vendor/packages/sqlalchemy/test/orm/test_merge.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "276585"
},
{
"name": "HTML",
"bytes": "600145"
},
{
"name": "JavaScript",
"bytes": "800276"
},
{
"name": "Python",
"bytes": "2762831"
},
{
"name": "Shell",
"bytes": "6720"
},
{
"name": "Smarty",
"bytes": "1752"
}
],
"symlink_target": ""
}
|
"""
Records from :ref:`equipment-database`\'s or :ref:`connections-database`\'s.
"""
from __future__ import unicode_literals
import json
import datetime
from enum import Enum
from xml.etree.cElementTree import Element
from collections import OrderedDict
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping # Python 2.7
from dateutil.relativedelta import relativedelta
from .utils import (
convert_to_enum,
convert_to_date,
)
from .constants import (
Parity,
StopBits,
DataBits,
Backend,
MSLInterface,
LF,
CR,
)
from .factory import (
connect,
find_interface,
)
class RecordDict(Mapping):
__slots__ = '_mapping'
def __delattr__(self, item):
# override to raise TypeError and to control the error message
self._raise('item deletion')
def __getattr__(self, item):
return self._mapping[item]
def __getitem__(self, item):
return self._mapping[item]
def __init__(self, dictionary):
"""A read-only dictionary that supports attribute access via a key lookup."""
if not isinstance(dictionary, dict):
raise TypeError("Can only create a 'RecordDict' from a dict")
# recursively make all values that are a dict a RecordDict
for k, v in dictionary.items():
if isinstance(v, dict):
dictionary[k] = RecordDict(v)
if isinstance(v, (list, tuple)):
def deep_tuple(a):
return tuple(map(deep_tuple, a)) if isinstance(a, (list, tuple)) else a
dictionary[k] = deep_tuple(v)
super(RecordDict, self).__setattr__('_mapping', dictionary)
def __iter__(self):
return iter(self._mapping)
def __len__(self):
return len(self._mapping)
def __repr__(self):
return 'RecordDict<{}>'.format(self._mapping)
def __setattr__(self, key, value):
# override to raise TypeError and to control the error message
self._raise('item assignment')
def _raise(self, message):
raise TypeError('A {!r} object does not support {}'.format(self.__class__.__name__, message))
def clear(self):
self._raise('clearing')
def copy(self):
""":class:`RecordDict`: Return a copy of the :class:`RecordDict`."""
return RecordDict(self._mapping.copy())
def fromkeys(self, *args, **kwargs):
self._raise('fromkeys')
def pop(self, *args, **kwargs):
self._raise('popping')
def popitem(self):
self._raise('popitem')
def setdefault(self, *args, **kwargs):
self._raise('setdefault')
def update(self, *args, **kwargs):
self._raise('updating')
def to_xml(self, tag='RecordDict'):
"""Convert the :class:`RecordDict` to an XML :class:`~xml.etree.ElementTree.Element`
Parameters
----------
tag : :class:`str`
The name of the :class:`~xml.etree.ElementTree.Element`.
Returns
-------
:class:`~xml.etree.ElementTree.Element`
The :class:`RecordDict` as an XML :class:`~xml.etree.ElementTree.Element`.
"""
root = Element(tag)
for k, v in self._mapping.items():
if isinstance(v, RecordDict):
element = v.to_xml(tag=k)
else:
element = Element(k)
element.text = repr(v)
root.append(element)
return root
def to_json(self):
""":class:`dict`: Convert the :class:`RecordDict` to be JSON_ serializable.
.. _JSON: https://www.json.org/
"""
root = dict()
for k, v in self._mapping.items():
if isinstance(v, RecordDict):
root[k] = v.to_json()
elif isinstance(v, Enum):
root[k] = v.name
else:
try:
json.dumps(v)
except TypeError:
root[k] = str(v) # cannot be serialized
else:
root[k] = v # can be serialized
return root
class Record(object):
def to_dict(self):
""":class:`dict`: Convert the Record to a :class:`dict`."""
return dict((name, getattr(self, name)) for name in self.__slots__)
def to_json(self):
""":class:`dict`: Convert the Record to be JSON_ serializable.
This differs from :meth:`to_dict` such that all values that are not
JSON_ serializable, like :class:`datetime.date` objects, are
converted to a :class:`str`.
.. _JSON: https://www.json.org/
"""
raise NotImplementedError
def to_xml(self):
""":class:`~xml.etree.ElementTree.Element`: Convert the Record to an XML
:class:`~xml.etree.ElementTree.Element`."""
raise NotImplementedError
@staticmethod
def _dict_to_str(dict_):
if dict_:
return '\n' + '\n'.join(' {}: {!r}'.format(k, v) for k, v in sorted(dict_.items()))
else:
return 'None'
@staticmethod
def _list_to_str(list_):
if list_:
return '\n' + '\n'.join([' {}'.format(line) for c in list_
for line in repr(c).splitlines()])
else:
return 'None'
class EquipmentRecord(Record):
__slots__ = ('alias', 'calibrations', 'category', 'connection', 'description',
'is_operable', 'maintenances', 'manufacturer', 'model',
'serial', 'team', 'unique_key', 'user_defined')
def __init__(self, alias='', calibrations=None, category='', connection=None,
description='', is_operable=False, maintenances=None,
manufacturer='', model='', serial='', team='', unique_key='', **user_defined):
"""Contains the information about an equipment record in an :ref:`equipment-database`.
Parameters
----------
alias : :class:`str`
An alias to use to reference this equipment by.
calibrations : :class:`list` of :class:`.CalibrationRecord`
The calibration history of the equipment.
category : :class:`str`
The category (e.g., Laser, DMM) that the equipment belongs to.
connection : :class:`.ConnectionRecord`
The information necessary to communicate with the equipment.
description : :class:`str`
A description about the equipment.
is_operable : :class:`bool`
Whether the equipment is able to be used.
maintenances : :class:`list` of :class:`.MaintenanceRecord`
The maintenance history of the equipment.
manufacturer : :class:`str`
The name of the manufacturer of the equipment.
model : :class:`str`
The model number of the equipment.
serial : :class:`str`
The serial number (or unique identifier) of the equipment.
team : :class:`str`
The team (e.g., Light Standards) that the equipment belongs to.
unique_key : :class:`str`
The key that uniquely identifies the equipment record in a database.
**user_defined
All additional key-value pairs are added to the :attr:`.user_defined` attribute.
"""
self.alias = alias # the alias should be of type str, but this is up to the user
""":class:`str`: An alias to use to reference this equipment by.
The `alias` can be defined in 4 ways:
* by specifying it when the EquipmentRecord is created
* by setting the value after the EquipmentRecord has been created
* in the **<equipment>** XML tag in a :ref:`configuration-file`
* in the **Properties** field in a :ref:`connections-database`
"""
self.calibrations = self._set_calibrations(calibrations)
""":class:`tuple` of :class:`.CalibrationRecord`: The calibration history of the equipment."""
self.category = '{}'.format(category)
""":class:`str`: The category (e.g., Laser, DMM) that the equipment belongs to."""
self.description = '{}'.format(description)
""":class:`str`: A description about the equipment."""
self.is_operable = bool(is_operable)
""":class:`bool`: Whether the equipment is able to be used."""
self.maintenances = self._set_maintenances(maintenances)
""":class:`tuple` of :class:`.MaintenanceRecord`: The maintenance history of the equipment."""
self.manufacturer = '{}'.format(manufacturer)
""":class:`str`: The name of the manufacturer of the equipment."""
self.model = '{}'.format(model)
""":class:`str`: The model number of the equipment."""
self.serial = '{}'.format(serial)
""":class:`str`: The serial number (or unique identifier) of the equipment."""
# requires self.manufacturer, self.model and self.serial to be already defined
self.connection = self._set_connection(connection)
""":class:`.ConnectionRecord`: The information necessary to communicate with the equipment."""
# cache this value because __str__ is called a lot during logging
self._str = 'EquipmentRecord<{}|{}|{}>'.format(self.manufacturer, self.model, self.serial)
self.team = '{}'.format(team)
""":class:`str`: The team (e.g., Light Standards) that the equipment belongs to."""
self.unique_key = '{}'.format(unique_key)
""":class:`str`: The key that uniquely identifies the equipment record in a database."""
try:
# a 'user_defined' kwarg was explicitly defined
ud = user_defined.pop('user_defined')
except KeyError:
ud = user_defined
else:
ud.update(**user_defined) # the user_defined dict might still contain other key-value pairs
self.user_defined = RecordDict(ud)
""":class:`.RecordDict`: User-defined, key-value pairs."""
def __repr__(self):
calibrations = self._list_to_str(self.calibrations)
maintenances = self._list_to_str(self.maintenances)
user_defined = self._dict_to_str(self.user_defined)
if self.connection:
connection = '\n ' + '\n '.join(repr(self.connection).splitlines())
else:
connection = 'None'
return 'EquipmentRecord\n' \
' alias: {!r}\n' \
' calibrations: {}\n' \
' category: {!r}\n' \
' connection: {}\n' \
' description: {!r}\n' \
' is_operable: {}\n' \
' maintenances: {}\n' \
' manufacturer: {!r}\n' \
' model: {!r}\n' \
' serial: {!r}\n' \
' team: {!r}\n' \
' unique_key: {!r}\n' \
' user_defined: {}'.format(self.alias, calibrations, self.category, connection,
self.description, self.is_operable, maintenances,
self.manufacturer, self.model, self.serial,
self.team, self.unique_key, user_defined)
def __str__(self):
return self._str
def __setattr__(self, name, value):
try:
# once the `user_defined` attribute is created the class becomes read only
# (except for the `alias` attribute which can be changed at any time)
self.user_defined
except AttributeError:
super(EquipmentRecord, self).__setattr__(name, value)
else:
if name == 'alias': # only allow the alias to be modified
super(EquipmentRecord, self).__setattr__(name, value)
else:
raise TypeError("An 'EquipmentRecord' cannot be modified. "
"Cannot set {!r} to {!r}".format(name, value))
def connect(self, demo=None):
"""Establish a connection to the equipment.
Calls the :func:`~msl.equipment.factory.connect` function.
Parameters
----------
demo : :class:`bool`, optional
Whether to simulate a connection to the equipment by opening
a connection in demo mode. This allows you to test your code
if the equipment is not physically connected to a computer.
If :data:`None` then the `demo` value is determined from the
:attr:`~.config.Config.DEMO_MODE` attribute.
Returns
-------
A :class:`~msl.equipment.connection.Connection` subclass.
"""
return connect(self, demo=demo)
def is_calibration_due(self, months=0):
"""Whether the equipment needs to be re-calibrated.
Parameters
----------
months : :class:`int`, optional
The number of months to add to today's date to determine if
the equipment needs to be re-calibrated within a certain amount
of time. For example, if ``months = 6`` then that is a way of
asking *"is a re-calibration due within the next 6 months?"*.
Returns
-------
:class:`bool`
:data:`True` if the equipment needs to be re-calibrated, :data:`False`
if it does not need to be re-calibrated (or it has never been calibrated).
"""
next_date = self.next_calibration_date()
if next_date is None:
return False
ask_date = datetime.date.today() + relativedelta(months=max(0, int(months)))
return ask_date > next_date
@property
def latest_calibration(self):
""":class:`.CalibrationRecord`: The latest calibration or :data:`None`
if the equipment has never been calibrated."""
latest = None
date = datetime.date(datetime.MINYEAR, 1, 1)
for report in self.calibrations:
# the calibration date gets precedence over the report date
if report.calibration_date > date:
date = report.calibration_date
latest = report
elif report.report_date > date:
date = report.report_date
latest = report
return latest
def next_calibration_date(self):
"""The date that the next calibration is due.
Returns
-------
:class:`datetime.date`
The next calibration date (or :data:`None` if the equipment has
never been calibrated or if it is no longer in operation).
"""
if not self.is_operable:
return None
report = self.latest_calibration
if report is None or report.calibration_cycle <= 0:
return None
# the calibration date gets precedence over the report date
if report.calibration_date.year != datetime.MINYEAR:
date = report.calibration_date
elif report.report_date.year != datetime.MINYEAR:
date = report.report_date
else:
return None
years = int(report.calibration_cycle)
months = int(round(12 * (report.calibration_cycle - years)))
return date + relativedelta(years=years, months=months)
def to_dict(self):
"""Convert this :class:`EquipmentRecord` to a :class:`dict`.
Returns
-------
:class:`dict`
The :class:`EquipmentRecord` as a :class:`dict`.
"""
return {
'alias': self.alias,
'calibrations': tuple(cr.to_dict() for cr in self.calibrations),
'category': self.category,
'connection': None if self.connection is None else self.connection.to_dict(),
'description': self.description,
'is_operable': self.is_operable,
'maintenances': tuple(mh.to_dict() for mh in self.maintenances),
'manufacturer': self.manufacturer,
'model': self.model,
'serial': self.serial,
'team': self.team,
'unique_key': self.unique_key,
'user_defined': self.user_defined,
}
def to_json(self):
"""Convert this :class:`EquipmentRecord` to be JSON_ serializable.
.. _JSON: https://www.json.org/
Returns
-------
:class:`dict`
The :class:`EquipmentRecord` as a JSON_\\-serializable object.
"""
return {
'alias': self.alias,
'calibrations': tuple(cr.to_json() for cr in self.calibrations),
'category': self.category,
'connection': None if self.connection is None else self.connection.to_json(),
'description': self.description,
'is_operable': self.is_operable,
'maintenances': tuple(mh.to_json() for mh in self.maintenances),
'manufacturer': self.manufacturer,
'model': self.model,
'serial': self.serial,
'team': self.team,
'unique_key': self.unique_key,
'user_defined': self.user_defined.to_json(),
}
def to_xml(self):
"""Convert this :class:`EquipmentRecord` to an XML :class:`~xml.etree.ElementTree.Element`.
Returns
-------
:class:`~xml.etree.ElementTree.Element`
The :class:`EquipmentRecord` as an XML element.
"""
root = Element('EquipmentRecord')
for name in EquipmentRecord.__slots__:
element = Element(name)
if name == 'connection':
if self.connection is not None:
element.append(self.connection.to_xml())
elif name == 'maintenances':
for mh in self.maintenances:
element.append(mh.to_xml())
elif name == 'calibrations':
for cr in self.calibrations:
element.append(cr.to_xml())
elif name == 'user_defined':
for key, value in sorted(self.user_defined.items()):
prop = Element(key)
prop.text = '{}'.format(value)
element.append(prop)
else:
element.text = '{}'.format(getattr(self, name))
root.append(element)
return root
def _set_connection(self, record):
if not record:
return None
if not isinstance(record, ConnectionRecord):
if isinstance(record, dict):
record = ConnectionRecord(**record)
else:
raise TypeError('Must pass in a ConnectionRecord object. Got {!r}'.format(record))
# ensure that the manufacturer, model and serial match
for item in ('manufacturer', 'model', 'serial'):
r, s = getattr(record, item), getattr(self, item)
if not r: # then it was not set in the ConnectionRecord
setattr(record, item, s)
elif r != s:
raise ValueError('ConnectionRecord.{0} ({1}) != EquipmentRecord.{0} ({2})'.format(item, r, s))
return record
@staticmethod
def _set_calibrations(calibrations):
if calibrations is None:
return tuple()
reports = []
for report in calibrations:
if isinstance(report, CalibrationRecord):
reports.append(report)
elif isinstance(report, dict):
report['measurands'] = [MeasurandRecord(**m) for m in report['measurands']]
reports.append(CalibrationRecord(**report))
else:
raise TypeError("Invalid data type {!r} for creating a 'CalibrationRecord'".format(type(report)))
return tuple(reports)
@staticmethod
def _set_maintenances(maintenances):
if maintenances is None:
return tuple()
history = []
for maintenance in maintenances:
if isinstance(maintenance, MaintenanceRecord):
history.append(maintenance)
elif isinstance(maintenance, dict):
history.append(MaintenanceRecord(**maintenance))
else:
raise TypeError("Invalid data type {!r} for creating a 'MaintenanceRecord'".format(type(maintenance)))
return tuple(history)
class ConnectionRecord(Record):
__slots__ = ('address', 'backend', 'interface', 'manufacturer', 'model', 'properties', 'serial')
_LF = ['\\n', "'\\n'", '"\\n"', "b'\\n'", b'\n', b'\\n', b"b'\\n'"]
_CR = ['\\r', "'\\r'", '"\\r"', "b'\\r'", b'\r', b'\\r', b"b'\\r'"]
_CRLF = ['\\r\\n', "'\\r\\n'", '"\\r\\n"', "b'\\r\\n'", b'\r\n', b'\\r\\n', b"b'\r\n'", b"b'\\r\\n'"]
def __init__(self, address='', backend=Backend.MSL, interface=None, manufacturer='',
model='', serial='', **properties):
"""Contains the information about a connection record in a :ref:`connections-database`.
Parameters
----------
address : :class:`str`
The address to use for the connection (see :ref:`address-syntax` for examples).
backend : :class:`str`, :class:`int`, or :class:`.Backend`
The backend to use to communicate with the equipment. The value must be able to
be converted to a :class:`.Backend` enum.
interface : :class:`str`, :class:`int`, or :class:`.MSLInterface`
The interface to use to communicate with the equipment. If :data:`None` then
determines the `interface` based on the value of `address`. If specified then
the value must be able to be converted to a :class:`.MSLInterface` enum.
manufacturer : :class:`str`
The name of the manufacturer of the equipment.
model : :class:`str`
The model number of the equipment.
serial : :class:`str`
The serial number (or unique identifier) of the equipment.
properties
Additional key-value pairs that are required to communicate with the equipment.
"""
self.address = '{}'.format(address)
""":class:`str`: The address to use for the connection (see :ref:`address-syntax` for examples)."""
self.backend = convert_to_enum(backend, Backend)
""":class:`.Backend`: The backend to use to communicate with the equipment."""
if interface:
self.interface = convert_to_enum(interface, MSLInterface, to_upper=True)
elif not address or self.backend != Backend.MSL:
self.interface = MSLInterface.NONE
else:
self.interface = find_interface(address)
""":class:`.MSLInterface`: The interface that is used for the communication system that
transfers data between a computer and the equipment (only used if the :attr:`.backend`
is equal to :attr:`~.Backend.MSL`)."""
self.manufacturer = '{}'.format(manufacturer)
""":class:`str`: The name of the manufacturer of the equipment."""
self.model = '{}'.format(model)
""":class:`str`: The model number of the equipment."""
self.properties = self._set_properties(properties)
""":class:`dict`: Additional key-value pairs that are required to communicate with the equipment.
For example, communicating via RS-232 may require::
{'baud_rate': 19200, 'parity': 'even'}
See the :ref:`connections-database` for examples on how to set the `properties`.
"""
self.serial = '{}'.format(serial)
""":class:`str`: The serial number (or unique identifier) of the equipment."""
def __repr__(self):
props = self._dict_to_str(dict((k, self.properties[k]) for k in sorted(self.properties)))
return 'ConnectionRecord\n' \
' address: {!r}\n' \
' backend: {!r}\n' \
' interface: {!r}\n' \
' manufacturer: {!r}\n' \
' model: {!r}\n' \
' properties: {}\n' \
' serial: {!r}'.format(self.address, self.backend, self.interface,
self.manufacturer, self.model, props, self.serial)
def __str__(self):
return 'ConnectionRecord<{}|{}|{}>'.format(self.manufacturer, self.model, self.serial)
def to_json(self):
"""Convert this :class:`ConnectionRecord` to be JSON_ serializable.
.. _JSON: https://www.json.org/
Returns
-------
:class:`dict`
The :class:`ConnectionRecord` as a JSON_\\-serializable object.
"""
props = dict()
for k, v in self.properties.items():
if isinstance(v, Enum):
props[k] = v.name
else:
try:
json.dumps(v)
except TypeError:
props[k] = repr(v) # cannot be serialized
else:
props[k] = v # can be serialized
return {
'address': self.address,
'backend': self.backend.name,
'interface': self.interface.name,
'manufacturer': self.manufacturer,
'model': self.model,
'properties': props,
'serial': self.serial,
}
def to_xml(self):
"""Convert this :class:`ConnectionRecord` to an XML :class:`~xml.etree.ElementTree.Element`.
Returns
-------
:class:`~xml.etree.ElementTree.Element`
The :class:`ConnectionRecord` as a XML :class:`~xml.etree.ElementTree.Element`.
"""
root = Element('ConnectionRecord')
for name, value in self.to_dict().items():
element = Element(name)
if name == 'properties':
for prop_key in sorted(self.properties):
prop_value = self.properties[prop_key]
prop = Element(prop_key)
if isinstance(prop_value, Enum):
prop.text = prop_value.name
elif prop_key.endswith('termination'):
prop.text = repr(prop_value)
elif isinstance(prop_value, bytes):
prop.text = repr(prop_value)
else:
prop.text = '{}'.format(prop_value)
element.append(prop)
elif isinstance(value, Enum):
element.text = value.name
else:
element.text = '{}'.format(value)
root.append(element)
return root
def _set_properties(self, kwargs):
try:
# a 'properties' kwarg was explicitly defined
properties = kwargs.pop('properties')
except KeyError:
properties = kwargs
else:
if not properties:
properties = {}
elif not isinstance(properties, dict):
raise TypeError('The properties kwarg for a ConnectionRecord must be of type dict. '
'Got {!r} -> {!r}'.format(type(properties), properties))
properties.update(kwargs)
if self.address.startswith('UDP'):
properties['socket_type'] = 'SOCK_DGRAM'
is_serial = self.interface == MSLInterface.SERIAL
if not is_serial and self.backend == Backend.PyVISA:
for alias in ('COM', 'ASRL', 'ASRLCOM'):
if self.address.startswith(alias):
is_serial = True
break
for key, value in properties.items():
if is_serial:
if key == 'parity':
properties[key] = convert_to_enum(value, Parity, to_upper=True)
elif key == 'stop_bits' or key == 'stopbits':
properties[key] = convert_to_enum(value, StopBits, to_upper=True)
elif key == 'data_bits' or key == 'bytesize':
properties[key] = convert_to_enum(value, DataBits, to_upper=True)
if key.endswith('termination'):
if value in ConnectionRecord._CRLF: # must check before LR and CR checks
properties[key] = CR + LF
elif value in ConnectionRecord._LF:
properties[key] = LF
elif value in ConnectionRecord._CR:
properties[key] = CR
elif not isinstance(value, bytes) and value is not None:
properties[key] = value.encode()
return properties
class MaintenanceRecord(Record):
__slots__ = ('comment', 'date')
def __init__(self, comment='', date=None):
"""Contains the information about a maintenance record in an :ref:`equipment-database`.
Parameters
----------
comment : :class:`str`
A description of the maintenance that was performed.
date : :class:`datetime.date`, :class:`datetime.datetime` or :class:`str`
An object that can be converted to a :class:`datetime.date` object.
If a :class:`str` then in the format ``'YYYY-MM-DD'``.
"""
self.comment = '{}'.format(comment)
""":class:`str`: A description of the maintenance that was performed."""
self.date = convert_to_date(date)
""":class:`datetime.date`: The date that the maintenance was performed."""
def __setattr__(self, name, value):
try:
self.date # once the `date` is defined the class becomes read only
except AttributeError:
super(MaintenanceRecord, self).__setattr__(name, value)
else:
raise TypeError("A 'MaintenanceRecord' cannot be modified. Cannot set {!r} to {!r}".format(name, value))
def __repr__(self):
return 'MaintenanceRecord\n' \
' comment: {!r}\n' \
' date: {}'.format(self.comment, self.date)
def __str__(self):
return 'MaintenanceRecord<{}>'.format(self.date)
def to_json(self):
"""Convert this :class:`MaintenanceRecord` to be JSON_ serializable.
.. _JSON: https://www.json.org/
Returns
-------
:class:`dict`
The :class:`MaintenanceRecord` as a JSON_\\-serializable object.
"""
return {
'comment': self.comment,
'date': self.date.isoformat(),
}
def to_xml(self):
"""Convert this :class:`MaintenanceRecord` to an XML :class:`~xml.etree.ElementTree.Element`.
Returns
-------
:class:`~xml.etree.ElementTree.Element`
The :class:`MaintenanceRecord` as a XML :class:`~xml.etree.ElementTree.Element`.
"""
root = Element('MaintenanceRecord')
comment_element = Element('comment')
comment_element.text = self.comment
root.append(comment_element)
date_element = Element('date')
date_element.text = self.date.isoformat()
date_element.attrib['format'] = 'YYYY-MM-DD'
root.append(date_element)
return root
class MeasurandRecord(Record):
__slots__ = ('calibration', 'conditions', 'type', 'unit')
def __init__(self, calibration=None, conditions=None, type='', unit=''):
"""Contains the information about a measurement for a calibration.
Parameters
----------
calibration : :class:`dict`
The information about the calibration.
conditions : :class:`dict`
The information about the conditions under which the measurement was performed.
type : :class:`str`
The type of measurement (e.g., voltage, temperature, transmittance, ...).
unit : :class:`str`
The unit that is associated with the measurement (e.g., V, deg C, %, ...).
"""
if calibration is None:
calibration = {}
elif not isinstance(calibration, dict):
raise TypeError("the 'calibration' parameter must be a dict")
if conditions is None:
conditions = {}
elif not isinstance(conditions, dict):
raise TypeError("the 'conditions' parameter must be a dict")
self.calibration = RecordDict(calibration)
""":class:`.RecordDict`: The information about calibration."""
self.conditions = RecordDict(conditions)
""":class:`.RecordDict`: The information about the measurement conditions."""
self.type = '{}'.format(type)
""":class:`str`: The type of measurement (e.g., voltage, temperature, transmittance, ...)."""
self.unit = '{}'.format(unit)
""":class:`str`: The unit that is associated with the measurement (e.g., V, deg C, %, ...)."""
def __setattr__(self, name, value):
try:
self.unit # once the `unit` is defined the class becomes read only
except AttributeError:
super(MeasurandRecord, self).__setattr__(name, value)
else:
raise TypeError("A 'MeasurandRecord' cannot be modified. Cannot set {!r} to {!r}".format(name, value))
def __repr__(self):
cal = self._dict_to_str(self.calibration)
con = self._dict_to_str(self.conditions)
return 'MeasurandRecord\n' \
' calibration: {}\n' \
' conditions: {}\n' \
' type: {!r}\n' \
' unit: {!r}'.format(cal, con, self.type, self.unit)
def __str__(self):
return 'MeasurandRecord<{}>'.format(self.type)
def to_json(self):
"""Convert this :class:`MeasurandRecord` to be JSON_ serializable.
.. _JSON: https://www.json.org/
Returns
-------
:class:`dict`
The :class:`MeasurandRecord` as a JSON_\\-serializable object.
"""
return {
'calibration': self.calibration.to_json(),
'conditions': self.conditions.to_json(),
'type': self.type,
'unit': self.unit,
}
def to_xml(self):
"""Convert this :class:`MeasurandRecord` to an XML :class:`~xml.etree.ElementTree.Element`.
Returns
-------
:class:`~xml.etree.ElementTree.Element`
The :class:`MeasurandRecord` as a XML :class:`~xml.etree.ElementTree.Element`.
"""
root = Element('MeasurandRecord')
for name in ('calibration', 'conditions'):
root.append(getattr(self, name).to_xml(tag=name))
for name in ('type', 'unit'):
element = Element(name)
element.text = getattr(self, name)
root.append(element)
return root
class CalibrationRecord(Record):
__slots__ = ('calibration_cycle', 'calibration_date', 'measurands', 'report_date', 'report_number')
def __init__(self, calibration_cycle=0, calibration_date=None, measurands=None,
report_date=None, report_number=''):
"""Contains the information about a calibration record in an :ref:`equipment-database`.
Parameters
----------
calibration_cycle : :class:`int` or :class:`float`
The number of years that can pass before the equipment must be re-calibrated.
calibration_date : :class:`datetime.date`, :class:`datetime.datetime` or :class:`str`
The date that the calibration was performed. If a :class:`str` then in the
format ``'YYYY-MM-DD'``.
measurands : :class:`list` of :class:`.MeasurandRecord`
The quantities that were measured.
report_date : :class:`datetime.date`, :class:`datetime.datetime` or :class:`str`
The date that the report was issued. If a :class:`str` then in the
format ``'YYYY-MM-DD'``.
report_number : :class:`str`
The report number.
"""
if measurands is None:
measurands = []
measures = []
for m in measurands:
if isinstance(m, MeasurandRecord):
measures.append(m)
elif m and isinstance(m, dict):
measures.append(MeasurandRecord(**m))
self.calibration_cycle = float(calibration_cycle)
""":class:`float`: The number of years that can pass before the equipment must be re-calibrated."""
self.calibration_date = convert_to_date(calibration_date)
""":class:`datetime.date`: The date that the calibration was performed."""
self.measurands = RecordDict(OrderedDict((m.type, m) for m in measures))
""":class:`.RecordDict`: The quantities that were measured."""
self.report_date = convert_to_date(report_date)
""":class:`datetime.date`: The date that the report was issued."""
self.report_number = '{}'.format(report_number)
""":class:`str`: The report number."""
def __setattr__(self, name, value):
try:
self.report_number # once the `report_number` is defined the class becomes read only
except AttributeError:
super(CalibrationRecord, self).__setattr__(name, value)
else:
raise TypeError("A 'CalibrationRecord' cannot be modified. Cannot set {!r} to {!r}".format(name, value))
def __repr__(self):
if self.measurands:
measurands = '\n' + '\n'.join(' {}'.format(line) for value in self.measurands.values()
for line in repr(value).splitlines())
else:
measurands = 'None'
return 'CalibrationRecord\n' \
' calibration_cycle: {}\n' \
' calibration_date: {}\n' \
' measurands: {}\n' \
' report_date: {}\n' \
' report_number: {!r}'.format(self.calibration_cycle, self.calibration_date,
measurands, self.report_date, self.report_number)
def __str__(self):
return 'CalibrationRecord<{}>'.format(self.report_number)
def to_json(self):
"""Convert this :class:`CalibrationRecord` to be JSON_ serializable.
.. _JSON: https://www.json.org/
Returns
-------
:class:`dict`
The :class:`CalibrationRecord` as a JSON_\\-serializable object.
"""
return {
'calibration_cycle': self.calibration_cycle,
'calibration_date': self.calibration_date.isoformat(),
'measurands': tuple(m.to_json() for m in self.measurands.values()),
'report_date': self.report_date.isoformat(),
'report_number': self.report_number
}
def to_xml(self):
"""Convert this :class:`CalibrationRecord` to an XML :class:`~xml.etree.ElementTree.Element`.
Returns
-------
:class:`~xml.etree.ElementTree.Element`
The :class:`CalibrationRecord` as a XML :class:`~xml.etree.ElementTree.Element`.
"""
root = Element('CalibrationRecord')
calibration_date = Element('calibration_date')
calibration_date.text = self.calibration_date.isoformat()
calibration_date.attrib['format'] = 'YYYY-MM-DD'
root.append(calibration_date)
calibration_cycle = Element('calibration_cycle')
calibration_cycle.text = str(self.calibration_cycle)
calibration_cycle.attrib['unit'] = 'years'
root.append(calibration_cycle)
measurands = Element('measurands')
for measurand in self.measurands.values():
measurands.append(measurand.to_xml())
root.append(measurands)
report_number = Element('report_number')
report_number.text = self.report_number
root.append(report_number)
report_date = Element('report_date')
report_date.text = self.report_date.isoformat()
report_date.attrib['format'] = 'YYYY-MM-DD'
root.append(report_date)
return root
|
{
"content_hash": "2c78c3eb29d86ca17021c4922fd9636d",
"timestamp": "",
"source": "github",
"line_count": 1042,
"max_line_length": 118,
"avg_line_length": 38.27831094049904,
"alnum_prop": 0.560873489444918,
"repo_name": "MSLNZ/msl-equipment",
"id": "dbfc63b12933a0f9fdb0ddda890e3322aef147b0",
"size": "39886",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "msl/equipment/record_types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2490464"
}
],
"symlink_target": ""
}
|
from logging import getLogger
from dateutil.parser import parse
from django.utils.translation import ugettext_noop as _
from django.utils.six import iteritems
from api.api_views import APIView
from api.views import exception_handler
from api.utils.views import call_api_view
from api.utils.request import set_request_method
from api.exceptions import (ObjectAlreadyExists, PreconditionRequired, NodeIsNotOperational, ExpectationFailed,
FailedDependency)
from api.task.utils import get_task_error_message
from api.task.response import TaskResponse, SuccessTaskResponse, FailureTaskResponse
from api.node.messages import LOG_IMG_IMPORT, LOG_IMG_DELETE
from api.node.image.serializers import NodeImageSerializer, ExtendedNodeImageSerializer
from vms.models import Image
from que import TG_DC_UNBOUND
from que.tasks import execute
# An image task can wait for a free image worker for up to 1 hour
IMAGE_TASK_EXPIRES = 3600
logger = getLogger(__name__)
class NodeImageView(APIView):
dc_bound = False
order_by_default = order_by_fields = ('name',)
def __init__(self, request, ns, img, data):
super(NodeImageView, self).__init__(request)
self.ns = ns
self.img = img
self.data = data
@classmethod
def import_for_vm(cls, request, ns, img, vm):
"""Import image required by VM. Return block_key or raise a FailedDependency API Exception (424)."""
node = ns.node
logger.warn('Image %s required for VM %s must be imported to node=%s, zpool=%s', img.name, vm, node, ns.zpool)
img_ns_status = img.get_ns_status(ns)
if img_ns_status == img.DELETING: # Someone is currently removing the image from node pool
# We can't do anything about this
raise ExpectationFailed('Required disk image is processed by another task')
block_key = img.get_block_key(ns)
if img_ns_status == img.IMPORTING:
logger.warn('Image %s is being imported to node=%s, zpool=%s; vm_manage will be blocked by block_key=%s',
img, node, ns.zpool, block_key)
return block_key
req = set_request_method(request, 'POST')
try:
res = cls(req, ns, img, None).post()
except Exception as ex:
res = exception_handler(ex, req)
if res is None:
raise ex
res.exception = True
if res.status_code in (200, 201):
logger.warn('POST node_image(%s, %s, %s) was successful: %s; task will be blocked by block_key=%s',
node.hostname, ns.zpool, img.name, res.data, block_key)
return block_key
else:
logger.error('POST node_image(%s, %s, %s) failed: %s (%s): %s; raising 424 API exception',
node.hostname, ns.zpool, img.name, res.status_code, res.status_text, res.data)
errmsg = get_task_error_message(res.data)
raise FailedDependency('Cannot import required image %s to node %s (%s: %s)' % (img.name, node.hostname,
res.status_code, errmsg))
@staticmethod
def _get_image_vms_map(ns):
image_vms = {}
for vm in ns.node.vm_set.select_related('dc').all().order_by('hostname'):
for img_uuid in vm.get_image_uuids(zpool=ns.zpool):
image_vms.setdefault(img_uuid, []).append({'hostname': vm.hostname, 'dc': vm.dc.name})
return image_vms
@staticmethod
def _get_image_vms(image_vms, img):
return [v for k, v in iteritems(image_vms) if img.uuid in k]
def get(self, many=False):
"""Show image details"""
img = self.img
if self.extended:
serializer = ExtendedNodeImageSerializer
image_vms = self._get_image_vms_map(self.ns)
if many:
for i in img:
i.vms = self._get_image_vms(image_vms, i)
else:
img.vms = self._get_image_vms(image_vms, img)
else:
serializer = NodeImageSerializer
if many:
if self.full or self.extended:
if img:
# noinspection PyUnresolvedReferences
res = serializer(self.request, img, many=True).data
else:
res = []
else:
res = list(img.values_list('name', flat=True))
else:
# noinspection PyUnresolvedReferences
res = serializer(self.request, img).data
return SuccessTaskResponse(self.request, res, dc_bound=False)
def _check_img(self):
img = self.img
if img.status != img.OK:
raise ExpectationFailed('Image status is not OK')
if img.get_ns_status(self.ns) != img.READY:
raise ExpectationFailed('Image is not ready')
def _check_node(self):
node = self.ns.node
if node.status != node.ONLINE:
raise NodeIsNotOperational
def _check_platform_version(self):
"""Issue #chili-937 & Issue #chili-938"""
min_version, max_version = self.img.min_platform, self.img.max_platform
if min_version or max_version:
node_version = parse(self.ns.node.platform_version)
if min_version:
if parse(min_version) > node_version:
raise PreconditionRequired('Image requires newer node version')
if max_version:
if parse(max_version) < node_version:
raise PreconditionRequired('Image requires older node version')
def _run_execute(self, msg, cmd, status):
self._check_img()
request, ns, img = self.request, self.ns, self.img
node = ns.node
detail = 'image=%s' % img.name
apiview = {
'view': 'node_image',
'method': request.method,
'hostname': node.hostname,
'zpool': ns.zpool,
'name': img.name,
}
# Set importing/deleting status
img.set_ns_status(ns, status)
# Create task
tid, err = execute(request, ns.storage.owner.id, cmd,
tg=TG_DC_UNBOUND,
queue=node.image_queue,
meta={'output': {'returncode': 'returncode', 'stdout': 'message'},
'replace_stdout': ((node.uuid, node.hostname), (img.uuid, img.name)),
'msg': msg, 'nodestorage_id': ns.id, 'apiview': apiview},
callback=('api.node.image.tasks.node_image_cb', {'nodestorage_id': ns.id, 'zpool': ns.zpool,
'img_uuid': img.uuid}),
lock='node_image ns:%s img:%s' % (ns.id, img.uuid), # Lock image per node storage
expires=IMAGE_TASK_EXPIRES)
if err:
img.del_ns_status(ns)
return FailureTaskResponse(request, err, obj=ns)
else:
return TaskResponse(request, tid, msg=msg, obj=ns, api_view=apiview, detail=detail, data=self.data)
def post(self):
self._check_node()
ns, img = self.ns, self.img
if img.nodestorage_set.filter(id=ns.id).exists():
raise ObjectAlreadyExists(model=Image)
try:
self._check_platform_version()
except PreconditionRequired as exc:
raise exc
except Exception as exc:
# An error in this check should not stop us - fail silently
logger.exception(exc)
return self._run_execute(LOG_IMG_IMPORT, 'imgadm import -q -P %s %s 2>&1' % (ns.zpool, img.uuid), img.IMPORTING)
def delete(self):
self._check_node()
ns, img = self.ns, self.img
zpool = ns.zpool
for vm in ns.node.vm_set.all():
if img.uuid in vm.get_image_uuids(zpool=zpool):
raise PreconditionRequired(_('Image is used by some VMs'))
return self._run_execute(LOG_IMG_DELETE, 'imgadm delete -P %s %s 2>&1' % (ns.zpool, img.uuid), img.DELETING)
def cleanup(self):
self._check_node()
ns = self.ns
zpool = ns.zpool
used_images = set()
for vm in ns.node.vm_set.all():
used_images.update(vm.get_image_uuids(zpool=zpool))
unused_images = self.img.exclude(uuid__in=used_images)
res = {}
node_hostname = ns.node.hostname
from api.node.image.views import node_image
for img in unused_images:
if img.get_ns_status(ns) == img.READY:
r = call_api_view(self.request, 'DELETE', node_image, node_hostname, zpool, img.name, log_response=True)
res[img.name] = {'status_code': r.status_code, 'response': r.data}
return SuccessTaskResponse(self.request, res, dc_bound=False)
|
{
"content_hash": "149abf2834f4957b7fc5ca074e10c1db",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 120,
"avg_line_length": 39.130434782608695,
"alnum_prop": 0.5737777777777778,
"repo_name": "erigones/esdc-ce",
"id": "f04a6714ebd9958f46f4d35b3573ae619d22d725",
"size": "9000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/node/image/api_views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "2728"
},
{
"name": "C",
"bytes": "8581"
},
{
"name": "CSS",
"bytes": "146461"
},
{
"name": "DTrace",
"bytes": "2250"
},
{
"name": "Erlang",
"bytes": "18842"
},
{
"name": "HTML",
"bytes": "473343"
},
{
"name": "JavaScript",
"bytes": "679240"
},
{
"name": "Jinja",
"bytes": "29584"
},
{
"name": "PLpgSQL",
"bytes": "17954"
},
{
"name": "Perl",
"bytes": "93955"
},
{
"name": "Python",
"bytes": "3124524"
},
{
"name": "Ruby",
"bytes": "56"
},
{
"name": "SCSS",
"bytes": "82814"
},
{
"name": "Shell",
"bytes": "281885"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
from future.utils import with_metaclass
from abc import ABCMeta, abstractproperty
import numpy as np
from skbio.util._decorator import classproperty, stable
from ._iupac_sequence import _motifs as parent_motifs
class NucleotideMixin(with_metaclass(ABCMeta, object)):
"""Mixin for adding funtionality for working with sequences of nucleotides.
This is an abstract base class (ABC) that cannot be instantiated.
Attributes
----------
complement_map
See Also
--------
DNA
RNA
"""
__complement_lookup = None
__gc_codes = None
@classproperty
def _complement_lookup(cls):
if cls.__complement_lookup is not None:
return cls.__complement_lookup
lookup = np.zeros(cls._number_of_extended_ascii_codes, dtype=np.uint8)
for key, value in cls.complement_map.items():
lookup[ord(key)] = ord(value)
cls.__complement_lookup = lookup
return lookup
@classproperty
def _gc_codes(cls):
if cls.__gc_codes is None:
gc_iupac_chars = 'GCS'
cls.__gc_codes = np.asarray([ord(g) for g in gc_iupac_chars])
return cls.__gc_codes
@property
def _motifs(self):
return _motifs
@abstractproperty
@classproperty
@stable(as_of='0.4.0')
def complement_map(cls):
"""Return mapping of nucleotide characters to their complements.
Returns
-------
dict
Mapping of each character to its complement.
Notes
-----
Complements cannot be defined for a generic nucleotide sequence because
the complement of ``A`` is ambiguous. Thanks, nature...
"""
return set() # pragma: no cover
@stable(as_of='0.4.0')
def complement(self, reverse=False):
"""Return the complement of the nucleotide sequence.
Parameters
----------
reverse : bool, optional
If ``True``, return the reverse complement. If positional metadata
is present, it will be reversed.
Returns
-------
NucleotideMixin
The (reverse) complement of the nucleotide sequence. The type and
metadata of the result will be the same as the nucleotide
sequence. If `reverse` is ``True``, positional metadata
will be reversed if it is present.
See Also
--------
reverse_complement
complement_map
Examples
--------
>>> from skbio import DNA
>>> seq = DNA('TTCATT', positional_metadata={'quality':range(6)})
>>> seq
DNA
-----------------------------
Positional metadata:
'quality': <dtype: int64>
Stats:
length: 6
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 16.67%
-----------------------------
0 TTCATT
>>> seq.complement()
DNA
-----------------------------
Positional metadata:
'quality': <dtype: int64>
Stats:
length: 6
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 16.67%
-----------------------------
0 AAGTAA
>>> rc = seq.complement(reverse=True)
>>> rc
DNA
-----------------------------
Positional metadata:
'quality': <dtype: int64>
Stats:
length: 6
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 16.67%
-----------------------------
0 AATGAA
>>> rc.positional_metadata['quality'].values
array([5, 4, 3, 2, 1, 0])
"""
result = self._complement_lookup[self._bytes]
complement = self._to(sequence=result)
if reverse:
complement = complement[::-1]
return complement
@stable(as_of='0.4.0')
def reverse_complement(self):
"""Return the reverse complement of the nucleotide sequence.
Returns
-------
NucleotideMixin
The reverse complement of the nucleotide sequence. The type and
metadata of the result will be the same as the nucleotide
sequence. If positional metadata is present, it will be reversed.
See Also
--------
complement
is_reverse_complement
Notes
-----
This method is equivalent to ``self.complement(reverse=True)``.
Examples
--------
>>> from skbio import DNA
>>> seq = DNA('TTCATT',
... positional_metadata={'quality':range(6)})
>>> seq = seq.reverse_complement()
>>> seq
DNA
-----------------------------
Positional metadata:
'quality': <dtype: int64>
Stats:
length: 6
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 16.67%
-----------------------------
0 AATGAA
>>> seq.positional_metadata['quality'].values
array([5, 4, 3, 2, 1, 0])
"""
return self.complement(reverse=True)
@stable(as_of='0.4.0')
def is_reverse_complement(self, other):
"""Determine if a sequence is the reverse complement of this sequence.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
Returns
-------
bool
``True`` if `other` is the reverse complement of the nucleotide
sequence.
Raises
------
TypeError
If `other` is a ``Sequence`` object with a different type than the
nucleotide sequence.
See Also
--------
reverse_complement
Examples
--------
>>> from skbio import DNA
>>> DNA('TTCATT').is_reverse_complement('AATGAA')
True
>>> DNA('TTCATT').is_reverse_complement('AATGTT')
False
>>> DNA('ACGT').is_reverse_complement('ACGT')
True
"""
other = self._munge_to_sequence(other, 'is_reverse_complement')
# avoid computing the reverse complement if possible
if len(self) != len(other):
return False
else:
# we reverse complement ourselves because `other` is a `Sequence`
# object at this point and we only care about comparing the
# underlying sequence data
return self.reverse_complement()._string == other._string
@stable(as_of='0.4.0')
def gc_content(self):
"""Calculate the relative frequency of G's and C's in the sequence.
This includes G, C, and S characters. This is equivalent to calling
``gc_frequency(relative=True)``. Note that the sequence will be
degapped before the operation, so gap characters will not be included
when calculating the length of the sequence.
Returns
-------
float
Relative frequency of G's and C's in the sequence.
See Also
--------
gc_frequency
Examples
--------
>>> from skbio import DNA
>>> DNA('ACGT').gc_content()
0.5
>>> DNA('ACGTACGT').gc_content()
0.5
>>> DNA('ACTTAGTT').gc_content()
0.25
>>> DNA('ACGT--..').gc_content()
0.5
>>> DNA('--..').gc_content()
0
`S` means `G` or `C`, so it counts:
>>> DNA('ASST').gc_content()
0.5
Other degenerates don't count:
>>> DNA('RYKMBDHVN').gc_content()
0.0
"""
return self.gc_frequency(relative=True)
@stable(as_of='0.4.0')
def gc_frequency(self, relative=False):
"""Calculate frequency of G's and C's in the sequence.
This calculates the minimum GC frequency, which corresponds to IUPAC
characters G, C, and S (which stands for G or C).
Parameters
----------
relative : bool, optional
If False return the frequency of G, C, and S characters (ie the
count). If True return the relative frequency, ie the proportion
of G, C, and S characters in the sequence. In this case the
sequence will also be degapped before the operation, so gap
characters will not be included when calculating the length of the
sequence.
Returns
-------
int or float
Either frequency (count) or relative frequency (proportion),
depending on `relative`.
See Also
--------
gc_content
Examples
--------
>>> from skbio import DNA
>>> DNA('ACGT').gc_frequency()
2
>>> DNA('ACGT').gc_frequency(relative=True)
0.5
>>> DNA('ACGT--..').gc_frequency(relative=True)
0.5
>>> DNA('--..').gc_frequency(relative=True)
0
`S` means `G` or `C`, so it counts:
>>> DNA('ASST').gc_frequency()
2
Other degenerates don't count:
>>> DNA('RYKMBDHVN').gc_frequency()
0
"""
counts = np.bincount(self._bytes,
minlength=self._number_of_extended_ascii_codes)
gc = counts[self._gc_codes].sum()
if relative:
seq = self.degap()
if len(seq) != 0:
gc /= len(seq)
return gc
_motifs = parent_motifs.copy()
@_motifs("purine-run")
def _motif_purine_run(sequence, min_length, ignore):
"""Identifies purine runs"""
return sequence.find_with_regex("([AGR]{%d,})" % min_length,
ignore=ignore)
@_motifs("pyrimidine-run")
def _motif_pyrimidine_run(sequence, min_length, ignore):
"""Identifies pyrimidine runs"""
return sequence.find_with_regex("([CTUY]{%d,})" % min_length,
ignore=ignore)
|
{
"content_hash": "6aab50bb1ef86ff7b92c757045b7fb41",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 79,
"avg_line_length": 28.390581717451525,
"alnum_prop": 0.5252219728754025,
"repo_name": "demis001/scikit-bio",
"id": "9b5c75cf12692ef06f0dc9a12454659a572ca1df",
"size": "10603",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "skbio/sequence/_nucleotide_mixin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "39087"
},
{
"name": "CSS",
"bytes": "4379"
},
{
"name": "Groff",
"bytes": "259"
},
{
"name": "Makefile",
"bytes": "585"
},
{
"name": "Python",
"bytes": "1983954"
}
],
"symlink_target": ""
}
|
import os.path
import uuid
import mock
from solum.tests import base
from solum.tests import fakes
from solum.tests import utils
from solum.worker.handlers import shell_nobuild as shell_handler
class HandlerTest(base.BaseTestCase):
def setUp(self):
super(HandlerTest, self).setUp()
self.ctx = utils.dummy_context()
# Notice most of these mocks do not modify shell_nobuild, but shell.
@mock.patch('solum.worker.handlers.shell.Handler._get_environment')
@mock.patch('solum.objects.registry')
@mock.patch('subprocess.Popen')
@mock.patch('solum.conductor.api.API.build_job_update')
@mock.patch('solum.worker.handlers.shell.update_assembly_status')
@mock.patch('solum.worker.handlers.shell_nobuild.update_assembly_status')
def test_unittest_and_build(self, mock_a_update_nb, mock_a_update,
mock_b_update, mock_popen, mock_registry,
mock_get_env):
handler = shell_handler.Handler()
fake_assembly = fakes.FakeAssembly()
fake_glance_id = str(uuid.uuid4())
mock_registry.Assembly.get_by_id.return_value = fake_assembly
handler._update_assembly_status = mock.MagicMock()
mock_popen.return_value.wait.return_value = 0
mock_popen.return_value.communicate.return_value = [
'foo\ncreated_image_id=%s' % fake_glance_id, None]
test_env = {'PATH': '/bin'}
mock_get_env.return_value = test_env
handler.build(self.ctx, 5, 'git://example.com/foo', 'new_app',
'1-2-3-4', 'heroku',
'docker', 44, 'faketests')
proj_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', '..', '..'))
util_dir = os.path.join(proj_dir, 'contrib', 'lp-cedarish', 'docker')
u_script = os.path.join(util_dir, 'unittest-app')
expected = [
mock.call([u_script, 'git://example.com/foo', 'master',
self.ctx.tenant, 'faketests'], env=test_env)]
self.assertEqual(expected, mock_popen.call_args_list)
# The UNIT_TESTING update happens from shell...
expected = [mock.call(self.ctx, 44, 'UNIT_TESTING')]
self.assertEqual(expected, mock_a_update.call_args_list)
# ...but the READY update happens in shell_nobuild.
expected = [mock.call(self.ctx, 44, 'READY')]
self.assertEqual(expected, mock_a_update_nb.call_args_list)
@mock.patch('solum.worker.handlers.shell.Handler._get_environment')
@mock.patch('subprocess.Popen')
@mock.patch('solum.worker.handlers.shell.update_assembly_status')
@mock.patch('solum.objects.registry')
def test_unittest_no_build(self, mock_registry, mock_a_update,
mock_popen, mock_get_env):
handler = shell_handler.Handler()
handler._update_assembly_status = mock.MagicMock()
fake_assembly = fakes.FakeAssembly()
mock_registry.Assembly.get_by_id.return_value = fake_assembly
mock_popen.return_value.wait.return_value = 1
test_env = {'PATH': '/bin'}
mock_get_env.return_value = test_env
handler.build(self.ctx, 5, 'git://example.com/foo', 'new_app',
'1-2-3-4', 'heroku',
'docker', 44, 'faketests')
proj_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', '..', '..'))
util_dir = os.path.join(proj_dir, 'contrib', 'lp-cedarish', 'docker')
u_script = os.path.join(util_dir, 'unittest-app')
expected = [
mock.call([u_script, 'git://example.com/foo', 'master',
self.ctx.tenant, 'faketests'], env=test_env)]
self.assertEqual(expected, mock_popen.call_args_list)
expected = [mock.call(self.ctx, 44, 'UNIT_TESTING'),
mock.call(self.ctx, 44, 'UNIT_TESTING_FAILED')]
self.assertEqual(expected, mock_a_update.call_args_list)
class TestNotifications(base.BaseTestCase):
def setUp(self):
super(TestNotifications, self).setUp()
self.ctx = utils.dummy_context()
self.db = self.useFixture(utils.Database())
@mock.patch('solum.objects.registry')
def test_update_assembly_status(self, mock_registry):
mock_assembly = mock.MagicMock()
mock_registry.Assembly.get_by_id.return_value = mock_assembly
shell_handler.update_assembly_status(self.ctx, '1234',
'BUILDING')
mock_registry.Assembly.get_by_id.assert_called_once_with(self.ctx,
'1234')
mock_assembly.save.assert_called_once_with(self.ctx)
self.assertEqual(mock_assembly.status, 'BUILDING')
@mock.patch('solum.objects.registry')
def test_update_assembly_status_pass(self, mock_registry):
shell_handler.update_assembly_status(self.ctx, None,
'BUILDING')
self.assertEqual(mock_registry.call_count, 0)
class TestBuildCommand(base.BaseTestCase):
scenarios = [
('docker',
dict(source_format='heroku', image_format='docker',
base_image_id='auto',
expect='lp-cedarish/docker/build-app')),
('vmslug',
dict(source_format='heroku', image_format='qcow2',
base_image_id='auto',
expect='lp-cedarish/vm-slug/build-app')),
('dockerfile',
dict(source_format='dockerfile', image_format='docker',
base_image_id='auto',
expect='lp-dockerfile/docker/build-app')),
('dib',
dict(source_format='dib', image_format='qcow2',
base_image_id='xyz',
expect='diskimage-builder/vm-slug/build-app'))]
def test_build_cmd(self):
ctx = utils.dummy_context()
handler = shell_handler.Handler()
cmd = handler._get_build_command(ctx,
'http://example.com/a.git',
'testa',
self.base_image_id,
self.source_format,
self.image_format)
self.assertIn(self.expect, cmd[0])
self.assertEqual('http://example.com/a.git', cmd[1])
self.assertEqual('testa', cmd[2])
self.assertEqual(ctx.tenant, cmd[3])
if self.base_image_id == 'auto' and self.image_format == 'qcow2':
self.assertEqual('cedarish', cmd[4])
else:
self.assertEqual(self.base_image_id, cmd[4])
|
{
"content_hash": "b367de403ffbe258dc2761e43ce97e0e",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 77,
"avg_line_length": 43.90909090909091,
"alnum_prop": 0.5718722271517303,
"repo_name": "gilbertpilz/solum",
"id": "31c6d515ade443c4be719a9690d593257d9eb56c",
"size": "7347",
"binary": false,
"copies": "1",
"ref": "refs/heads/camp/item-1",
"path": "solum/tests/worker/handlers/test_shell_nobuild.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "75"
},
{
"name": "Python",
"bytes": "888136"
},
{
"name": "Ruby",
"bytes": "217"
},
{
"name": "Shell",
"bytes": "37758"
}
],
"symlink_target": ""
}
|
import argparse
import logging
import sys
from . import kazoocli
from .version import description
def main(args=None):
parser = argparse.ArgumentParser(description=description)
parser.add_argument('server', nargs='?',
help='The ZooKeeper server to connect to',
default='localhost:2181')
parser.add_argument('-D', '--debug', action='store_const',
const=logging.DEBUG, default=logging.ERROR,
help='Enable debug logging of Kazoo')
parser.add_argument('-t', '--timeout', default=3, type=int,
help='Set connection timeout (default: 3 seconds)')
options = parser.parse_args(args=args)
logging.basicConfig(stream=sys.stderr, level=options.debug)
try:
kazoocli.KazooCli(options.server, options.timeout)
return 0
except:
if options.debug == logging.DEBUG:
raise
print(sys.exc_info()[1])
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
{
"content_hash": "78a91abcfe3ff3a800587eb110de070f",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 75,
"avg_line_length": 32.03030303030303,
"alnum_prop": 0.5988647114474929,
"repo_name": "mpetazzoni/kazoocli",
"id": "039be5c5d129ddd0824a94a28d6dd803867ef66b",
"size": "1148",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kazoocli/__main__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14059"
}
],
"symlink_target": ""
}
|
import time
import sys
import heapq
from gridmap import GridNode, GridMap
from random import random
class Algo():
def __init__(self, grid):
self.visited = []
self.calctime = -1
self.grid = grid
self.result = []
def print_result(self, printmap):
print 'Number of visited nodes: ' + str(len(self.visited))
print 'Number of nodes in result path: ' + str(len(self.result))
print 'Calculate time: ' + str(self.calctime) + ' sec'
if printmap:
self.grid.print_grid()
def save_result(self, printmap, name='result.txt'):
f = open(name, 'w')
filename.write('Number of visited nodes: ' + str(len(self.visited)))
filename.write('\n')
filename.write('Calculate time: ' + str(self.calctime) + '\n')
f.close()
if printmap:
self.grid.save_grid(name)
def get_neighbors(self, vertex):
if self.grid.can_diagonal_move:
candidates = [(vertex.x - 1, vertex.y - 1),
(vertex.x - 1, vertex.y),
(vertex.x - 1, vertex.y + 1),
(vertex.x, vertex.y - 1),
(vertex.x, vertex.y + 1),
(vertex.x + 1, vertex.y - 1),
(vertex.x + 1, vertex.y),
(vertex.x + 1, vertex.y + 1)]
else:
candidates = [(vertex.x - 1, vertex.y),
(vertex.x, vertex.y + 1),
(vertex.x, vertex.y - 1),
(vertex.x + 1, vertex.y)]
candidates = [(x, y) for (x, y) in candidates \
if x >= 0 and y >= 0 \
and x < self.grid.col \
and y < self.grid.row]
result = []
for i, j in candidates:
result.append(self.grid.matrix[j][i])
return result
class AStar(Algo):
def calc_path(self, debug, filename):
if not self.grid.start_set:
print 'please set start and goal, or randomize grid'
return
if debug:
filename.write('\nin A*\n')
t = time.time() # saves start time
start = self.grid.start
start.mindistance = 0.0
start.h_value = self.heuristic(start)
openset = [start]
closedset = set()
while openset:
u = heapq.heappop(openset) # pop vertex whose h_value, g + h, is minimum
self.visited.append(u)
u.visited = True
closedset.add(u) # u is calculated completely, so add it in closed set
if debug:
filename.write('u: ' + str(u.x) + ', ' + str(u.y))
filename.write(' ' + str(time.time()) + '\n')
if u == self.grid.goal: # goal
break
for target in self.get_neighbors(u):
if debug:
filename.write('target: ' + str(target.x))
filename.write(', ' + str(target.y))
filename.write(' ' + str(time.time()) + '\n')
weight = 1.0 # default weight is 1.0
g = weight + u.mindistance # g is real distance
h = self.heuristic(target) # h is estimated value from target to goal
f = g + h
if not target.is_obs: # cannot go through obstacles
if target.isopen or target in closedset:
if target.mindistance > g: # need to be update
target.h_value = f
target.mindistance = g
target.previous = u
else: # new vertex
target.isopen = True
target.h_value = f
target.mindistance = g
target.previous = u
heapq.heappush(openset, target)
self.calctime = time.time() - t
# reconstruct path
# follows vertices' previous
u = self.grid.goal
while u:
u.in_result = True
self.result.append(u)
u = u.previous
def heuristic(self, curr):
"estimate the distance from curr to goal"
return (abs(curr.x - self.grid.goal.x) \
+ abs(curr.y - self.grid.goal.y))
class Dijkstra(Algo):
def calc_path(self, debug, filename):
if not self.grid.start_set:
print 'please set start and goal, or randomize grid'
return
if debug:
filename.write('\nin Dijkstra\n')
t = time.time() # saves start time
start = self.grid.start
start.mindistance = 0.0
start.h_value = 0.0
priority_queue = [start]
while priority_queue:
u = heapq.heappop(priority_queue) # pop vertex whose mindistance is minimum
self.visited.append(u)
u.visited = True
if debug:
filename.write('u: ' + str(u.x) + ', ' + str(u.y))
filename.write(' ' + str(time.time()) + '\n')
if u == self.grid.goal: # goal
break
for target in self.get_neighbors(u):
if debug:
filename.write('target: ' + str(target.x))
filename.write(', ' + str(target.y))
filename.write(' ' + str(time.time()) + '\n')
weight = 1.0
g = weight + u.mindistance
if not target.is_obs and g < target.mindistance: # don't care obstacles, and if g < target.mindistance then update needed
if target in priority_queue:
priority_queue.remove(target)
target.mindistance = g
target.h_value = g
target.previous = u
heapq.heappush(priority_queue, target)
self.calctime = time.time() - t
# reconstruct path
# following previous
u = self.grid.goal
while u:
u.in_result = True
self.result.append(u)
u = u.previous
class BestFirst(Algo):
def calc_path(self, debug, filename):
if not self.grid.start_set:
print 'please set start and goal, or randomize grid'
return
t = time.time()
start = self.grid.start
start.mindistance = 0.0
start.h_value = self.heuristic(start)
priority_queue = [start]
closed = set()
while priority_queue:
u = heapq.heappop(priority_queue) # pop vertex whose heuristic(vertex) is minimum
self.visited.append(u)
closed.add(u)
if u == self.grid.goal:
break
for target in self.get_neighbors(u):
h = self.heuristic(target)
if not target.is_obs and target not in closed: # not obstacle, not visited
if target in priority_queue:
priority_queue.remove(target)
target.mindistance = h
target.h_value = h
target.previous = u
heapq.heappush(priority_queue, target)
self.calctime = time.time() - t
u = self.grid.goal
while u:
u.in_result = True
self.result.append(u)
u = u.previous
def heuristic(self, curr):
return abs(curr.x - self.grid.goal.x) + abs(curr.y - self.grid.goal.y)
def usage():
print """USAGE:
python %s column row [flags]
flags:
-p : print the gridmap
--debug or -d : debug mode
""" % sys.argv[0]
def make_random_pick(row, col):
result = []
for i in xrange(row):
for j in xrange(col):
if random() < 0.2:
result.append((i, j))
return result
def main():
if len(sys.argv) < 2:
usage()
exit(0)
printmap = False
savefile = False
debugmode = False
f = None
if '-p' in sys.argv:
printmap = True
if '-s' in sys.argv:
savefile = True
if '--debug' in sys.argv or '-d' in sys.argv:
debugmode = True
f = open('debug.log', 'w')
ran_pick = make_random_pick(int(sys.argv[1]), int(sys.argv[2]))
bfs = BestFirst(GridMap(int(sys.argv[1]), int(sys.argv[2])))
dijk = Dijkstra(GridMap(int(sys.argv[1]), int(sys.argv[2])))
astar = AStar(GridMap(int(sys.argv[1]), int(sys.argv[2])))
while len(ran_pick) < 2:
ran_pick = make_random_pick(int(sys.argv[1]), int(sys.argv[2]))
bfs.grid.put_multiple_obs(ran_pick[1:-2])
dijk.grid.put_multiple_obs(ran_pick[1:-2])
astar.grid.put_multiple_obs(ran_pick[1:-2])
bfs.grid.set_start(ran_pick[0][0], ran_pick[0][1])
dijk.grid.set_start(ran_pick[0][0], ran_pick[0][1])
astar.grid.set_start(ran_pick[0][0], ran_pick[0][1])
bfs.grid.set_goal(ran_pick[-1][0], ran_pick[-1][1])
dijk.grid.set_goal(ran_pick[-1][0], ran_pick[-1][1])
astar.grid.set_goal(ran_pick[-1][0], ran_pick[-1][1])
print 'Calculating by Best-First-Search...'
bfs.calc_path(debugmode, f)
print 'Finish!'
print 'Calculating by Dijkstra algorithm...'
dijk.calc_path(debugmode, f)
print 'Finish!'
print 'Calculating by A* algorithm...'
astar.calc_path(debugmode, f)
print 'Finish!'
print
print 'Result of Best-First-Search'
bfs.print_result(printmap)
print
print 'Result of Dijkstra algorithm'
dijk.print_result(printmap)
print
print 'Result of A* algorithm'
astar.print_result(printmap)
if debugmode:
f.close()
if __name__ == '__main__':
main()
|
{
"content_hash": "313d8034d58b4e75974a7aabd16cdbb5",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 138,
"avg_line_length": 32.21172638436482,
"alnum_prop": 0.5022752553342097,
"repo_name": "adiectio/astar",
"id": "29edd100f1175f297fdb1582c09358da2859351c",
"size": "9977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14178"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.test import TransactionTestCase
from aldryn_newsblog.cms_appconfig import NewsBlogConfig
from aldryn_newsblog.models import Article
from aldryn_people.models import Person
from . import NewsBlogTestsMixin
class AdminTest(NewsBlogTestsMixin, TransactionTestCase):
def test_admin_owner_default(self):
from django.contrib import admin
admin.autodiscover()
# since we now have data migration to create the default
# NewsBlogConfig (if migrations were not faked, django >1.7)
# we need to delete one of configs to be sure that it is pre selected
# in the admin view.
if NewsBlogConfig.objects.count() > 1:
# delete the app config that was created during test set up.
NewsBlogConfig.objects.filter(namespace='NBNS').delete()
user = self.create_user()
user.is_superuser = True
user.save()
Person.objects.create(user=user, name=u' '.join(
(user.first_name, user.last_name)))
admin_inst = admin.site._registry[Article]
self.request = self.get_request('en')
self.request.user = user
self.request.META['HTTP_HOST'] = 'example.com'
response = admin_inst.add_view(self.request)
option = '<option value="1" selected="selected">%s</option>'
self.assertContains(response, option % user.username)
self.assertContains(response, option % user.get_full_name())
|
{
"content_hash": "27f2ceb91b1488c84195d6e31e82d032",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 77,
"avg_line_length": 39.28947368421053,
"alnum_prop": 0.6751507032819826,
"repo_name": "czpython/aldryn-newsblog",
"id": "77ce30cb9846bb907d1268b75013945d7a09afcb",
"size": "1518",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "aldryn_newsblog/tests/test_admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "27023"
},
{
"name": "JavaScript",
"bytes": "32640"
},
{
"name": "Python",
"bytes": "947522"
},
{
"name": "Shell",
"bytes": "216"
}
],
"symlink_target": ""
}
|
"""
Local support for Insteon.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/insteon_local/
"""
import logging
import requests
import voluptuous as vol
from homeassistant.const import (
CONF_PASSWORD, CONF_USERNAME, CONF_HOST, CONF_PORT, CONF_TIMEOUT)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['insteonlocal==0.48']
_LOGGER = logging.getLogger(__name__)
DEFAULT_PORT = 25105
DEFAULT_TIMEOUT = 10
DOMAIN = 'insteon_local'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the Insteon Hub component.
This will automatically import associated lights.
"""
from insteonlocal.Hub import Hub
conf = config[DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
timeout = conf.get(CONF_TIMEOUT)
try:
insteonhub = Hub(host, username, password, port, timeout, _LOGGER)
# Check for successful connection
insteonhub.get_buffer_status()
except requests.exceptions.ConnectTimeout:
_LOGGER.error("Error on insteon_local."
"Could not connect. Check config", exc_info=True)
return False
except requests.exceptions.ConnectionError:
_LOGGER.error("Error on insteon_local. Could not connect."
"Check config", exc_info=True)
return False
except requests.exceptions.RequestException:
if insteonhub.http_code == 401:
_LOGGER.error("Bad user/pass for insteon_local hub")
return False
else:
_LOGGER.error("Error on insteon_local hub check", exc_info=True)
return False
hass.data['insteon_local'] = insteonhub
return True
|
{
"content_hash": "4172a907f865adbd9f64e5bf539c0d9c",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 76,
"avg_line_length": 30.507042253521128,
"alnum_prop": 0.669898430286242,
"repo_name": "shaftoe/home-assistant",
"id": "1f700b5ff3e95c5ffb105c08fd975ca0c9c83bbd",
"size": "2166",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/insteon_local.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1584258"
},
{
"name": "Python",
"bytes": "5479272"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "15017"
}
],
"symlink_target": ""
}
|
major = 0
minor = 4
micro = 9
release_level = 'beta'
from __svn_version__ import svn_version
special_version = '%(major)d.%(minor)d.%(micro)d_%(svn_version)s'\
% (locals ())
|
{
"content_hash": "c500b0ba7ff3852daaa8804ffde0d1ae",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 66,
"avg_line_length": 21.555555555555557,
"alnum_prop": 0.5824742268041238,
"repo_name": "scipy/scipy-svn",
"id": "f6e06061f4efc1fa507040da3bf358ded971d779",
"size": "194",
"binary": false,
"copies": "59",
"ref": "refs/heads/master",
"path": "scipy/special/special_version.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8532454"
},
{
"name": "C++",
"bytes": "6602032"
},
{
"name": "FORTRAN",
"bytes": "5895476"
},
{
"name": "Objective-C",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "4753723"
},
{
"name": "Shell",
"bytes": "1742"
}
],
"symlink_target": ""
}
|
from requestbuilder import Arg
from requestbuilder.exceptions import ArgumentError
from requestbuilder.mixins import TabifyingMixin
from euca2ools.commands.ec2 import EC2Request
class ModifyInstanceTypeAttribute(EC2Request, TabifyingMixin):
DESCRIPTION = '[Eucalyptus cloud admin only] Modify an instance type'
ARGS = [Arg('Name', metavar='INSTANCETYPE',
help='name of the instance type to modify (required)'),
Arg('-c', '--cpus', dest='Cpu', metavar='COUNT', type=int,
help='number of virtual CPUs to allocate to each instance'),
Arg('-d', '--disk', dest='Disk', metavar='GiB', type=int,
help='amount of instance storage to allow each instance'),
Arg('-m', '--memory', dest='Memory', metavar='MiB', type=int,
help='amount of RAM to allocate to each instance'),
Arg('--reset', dest='Reset', action='store_true',
help='reset the instance type to its default configuration')]
# noinspection PyExceptionInherit
def configure(self):
EC2Request.configure(self)
if (self.args.get('Reset') and
any(self.args.get(attr) is not None for attr in
('Cpu', 'Disk', 'Memory'))):
# Basically, reset is mutually exclusive with everything else.
raise ArgumentError('argument --reset may not be used with '
'instance type attributes')
def print_result(self, result):
newtype = result.get('instanceType', {})
print self.tabify(('INSTANCETYPE', newtype.get('name'),
newtype.get('cpu'), newtype.get('memory'),
newtype.get('disk')))
|
{
"content_hash": "87807cacc5b2794abefc7357f8732387",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 77,
"avg_line_length": 49.74285714285714,
"alnum_prop": 0.6042504307869041,
"repo_name": "nagyistoce/euca2ools",
"id": "11434a35ba6bc92565b1a747f84e40c3f7d7ff2d",
"size": "3088",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "euca2ools/commands/ec2/modifyinstancetypeattribute.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1230322"
},
{
"name": "Shell",
"bytes": "872"
}
],
"symlink_target": ""
}
|
a = {}
print 'Before error handler section'
try:
print 'one'
print a['not_here']
print 'two'
except KeyError:
print 'There was a key exception'
print 'three'
|
{
"content_hash": "743cff2f8cd6425e2b525d3d7df88fd0",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 37,
"avg_line_length": 14.75,
"alnum_prop": 0.6384180790960452,
"repo_name": "cstamm/PythonNetworking",
"id": "da2af4459346bf9331b08197c1157f9c73446186",
"size": "200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_course/eh_try_except_1.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20445"
}
],
"symlink_target": ""
}
|
"""This module defines the vtgate client interface.
"""
from vtdb import vtgate_cursor
# mapping from protocol to python class.
vtgate_client_conn_classes = dict()
def register_conn_class(protocol, c):
"""Used by implementations to register themselves.
Args:
protocol: short string to document the protocol.
c: class to register.
"""
vtgate_client_conn_classes[protocol] = c
def connect(protocol, vtgate_addrs, timeout, *pargs, **kargs):
"""connect will return a dialed VTGateClient connection to a vtgate server.
FIXME(alainjobart): exceptions raised are not consistent.
Args:
protocol: the registered protocol to use.
vtgate_addrs: single or multiple vtgate server addresses to connect to.
Which address is actually used depends on the load balancing
capabilities of the underlying protocol used.
timeout: connection timeout, float in seconds.
*pargs: passed to the registered protocol __init__ method.
**kargs: passed to the registered protocol __init__ method.
Returns:
A dialed VTGateClient.
Raises:
dbexceptions.OperationalError: if we are unable to establish the connection
(for instance, no available instance).
dbexceptions.Error: if vtgate_addrs have the wrong type.
Exception: if the protocol is unknown, or vtgate_addrs are malformed.
"""
if protocol not in vtgate_client_conn_classes:
raise Exception('Unknown vtclient protocol', protocol)
conn = vtgate_client_conn_classes[protocol](vtgate_addrs, timeout,
*pargs, **kargs)
conn.dial()
return conn
class VTGateClient(object):
"""VTGateClient is the interface for the vtgate client implementations.
All implementations must implement all these methods.
If something goes wrong with the connection, this object will be thrown out.
FIXME(alainjobart) transactional state (the Session object) is currently
maintained by this object. It should be maintained by the cursor, and just
returned / passed in with every method that makes sense.
FIXME(alainjobart) streaming state is also maintained by this object.
It should also be maintained by the cursor only.
"""
def __init__(self, addr, timeout):
"""Initialize a vtgate connection.
Args:
addr: server address. Can be protocol dependent.
timeout: connection timeout (float, in seconds).
"""
pass
def dial(self):
"""Dial to the server. If successful, call close() to close the connection.
"""
pass
def close(self):
"""Close the connection. This object may be re-used again by calling dial().
"""
pass
def is_closed(self):
"""Checks the connection status.
Returns:
True if this connection is closed.
"""
pass
def cursor(self, *pargs, **kwargs):
"""Creates a cursor instance associated with this connection.
Args:
*pargs: passed to the cursor constructor.
**kwargs: passed to the cursor constructor.
Returns:
A new cursor to use on this connection.
"""
cursorclass = None
if 'cursorclass' in kwargs:
cursorclass = kwargs['cursorclass']
del kwargs['cursorclass']
if cursorclass is None:
cursorclass = vtgate_cursor.VTGateCursor
return cursorclass(self, *pargs, **kwargs)
def begin(self):
"""Starts a transaction.
FIXME(alainjobart): instead of storing the Session as member variable,
should return it and let the cursor store it.
Raises:
dbexceptions.TimeoutError: for connection timeout.
dbexceptions.RequestBacklog: the server is overloaded, and this query
is asked to back off.
dbexceptions.IntegrityError: integrity of an index would not be
guaranteed with this statement.
dbexceptions.DatabaseError: generic database error.
dbexceptions.ProgrammingError: the supplied statements are invalid,
this is probably an error in the code.
dbexceptions.FatalError: this query should not be retried.
"""
pass
def commit(self):
"""Commits the current transaction.
FIXME(alainjobart): should take the session in.
Raises:
dbexceptions.TimeoutError: for connection timeout.
dbexceptions.RequestBacklog: the server is overloaded, and this query
is asked to back off.
dbexceptions.IntegrityError: integrity of an index would not be
guaranteed with this statement.
dbexceptions.DatabaseError: generic database error.
dbexceptions.ProgrammingError: the supplied statements are invalid,
this is probably an error in the code.
dbexceptions.FatalError: this query should not be retried.
"""
pass
def rollback(self):
"""Rolls the current transaction back.
FIXME(alainjobart): should take the session in.
Raises:
dbexceptions.TimeoutError: for connection timeout.
dbexceptions.RequestBacklog: the server is overloaded, and this query
is asked to back off.
dbexceptions.IntegrityError: integrity of an index would not be
guaranteed with this statement.
dbexceptions.DatabaseError: generic database error.
dbexceptions.ProgrammingError: the supplied statements are invalid,
this is probably an error in the code.
dbexceptions.FatalError: this query should not be retried.
"""
pass
def _execute(self, sql, bind_variables, tablet_type,
keyspace=None,
shards=None,
keyspace_ids=None,
keyranges=None,
entity_keyspace_id_map=None, entity_column_name=None,
not_in_transaction=False):
"""Executes the given sql.
FIXME(alainjobart): should take the session in.
Args:
sql: query to execute.
bind_variables: map of bind variables for the query.
tablet_type: the (string) version of the tablet type.
keyspace: if specified, the keyspace to send the query to.
Required if any of the routing parameters is used.
Not required only if using vtgate v3 API.
shards: if specified, use this list of shards names to route the query.
Incompatible with keyspace_ids, keyranges, entity_keyspace_id_map,
entity_column_name.
Requires keyspace.
keyspace_ids: if specified, use this list to route the query.
Incompatible with shards, keyranges, entity_keyspace_id_map,
entity_column_name.
Requires keyspace.
keyranges: if specified, use this list to route the query.
Incompatible with shards, keyspace_ids, entity_keyspace_id_map,
entity_column_name.
Requires keyspace.
entity_keyspace_id_map: if specified, use this map to route the query.
Incompatible with shards, keyspace_ids, keyranges.
Requires keyspace, entity_column_name.
entity_column_name: if specified, use this value to route the query.
Incompatible with shards, keyspace_ids, keyranges.
Requires keyspace, entity_keyspace_id_map.
not_in_transaction: force this execute to be outside the current
transaction, if any.
Returns:
results: list of rows.
rowcount: how many rows were affected.
lastrowid: auto-increment value for the last row inserted.
fields: describes the field names and types.
Raises:
dbexceptions.TimeoutError: for connection timeout.
dbexceptions.RequestBacklog: the server is overloaded, and this query
is asked to back off.
dbexceptions.IntegrityError: integrity of an index would not be
guaranteed with this statement.
dbexceptions.DatabaseError: generic database error.
dbexceptions.ProgrammingError: the supplied statements are invalid,
this is probably an error in the code.
dbexceptions.FatalError: this query should not be retried.
"""
pass
def _execute_batch(self, sql_list, bind_variables_list, tablet_type,
keyspace_list=None,
shards_list=None,
keyspace_ids_list=None,
as_transaction=False):
"""Executes a list of sql queries.
These follow the same routing rules as _execute.
FIXME(alainjobart): should take the session in.
Args:
sql_list: list of SQL queries to execute.
bind_variables_list: bind variables to associated with each query.
tablet_type: the (string) version of the tablet type.
keyspace_list: if specified, the keyspaces to send the queries to.
Required if any of the routing parameters is used.
Not required only if using vtgate v3 API.
shards_list: if specified, use this list of shards names (per sql query)
to route each query.
Incompatible with keyspace_ids_list.
Requires keyspace_list.
keyspace_ids_list: if specified, use this list of keyspace_ids (per sql
query) to route each query.
Incompatible with shards_list.
Requires keyspace_list.
as_transaction: starts and commits a transaction around the statements.
Returns:
results: an array of (results, rowcount, lastrowid, fields) tuples,
one for each query.
Raises:
dbexceptions.TimeoutError: for connection timeout.
dbexceptions.RequestBacklog: the server is overloaded, and this query
is asked to back off.
dbexceptions.IntegrityError: integrity of an index would not be
guaranteed with this statement.
dbexceptions.DatabaseError: generic database error.
dbexceptions.ProgrammingError: the supplied statements are invalid,
this is probably an error in the code.
dbexceptions.FatalError: this query should not be retried.
"""
pass
def _stream_execute(self, sql, bind_variables, tablet_type,
keyspace=None,
shards=None,
keyspace_ids=None,
keyranges=None):
"""Executes the given sql, in streaming mode.
FIXME(alainjobart): the return values are weird (historical reasons)
and unused for now. We should use them, and not store the current
streaming status in the connection, but in the cursor.
Args:
sql: query to execute.
bind_variables: map of bind variables for the query.
tablet_type: the (string) version of the tablet type.
keyspace: if specified, the keyspace to send the query to.
Required if any of the routing parameters is used.
Not required only if using vtgate v3 API.
shards: if specified, use this list of shards names to route the query.
Incompatible with keyspace_ids, keyranges.
Requires keyspace.
keyspace_ids: if specified, use this list to route the query.
Incompatible with shards, keyranges.
Requires keyspace.
keyranges: if specified, use this list to route the query.
Incompatible with shards, keyspace_ids.
Requires keyspace.
Returns:
None
0
0
fields: the field definitions.
Raises:
dbexceptions.TimeoutError: for connection timeout.
dbexceptions.RequestBacklog: the server is overloaded, and this query
is asked to back off.
dbexceptions.IntegrityError: integrity of an index would not be
guaranteed with this statement.
dbexceptions.DatabaseError: generic database error.
dbexceptions.ProgrammingError: the supplied statements are invalid,
this is probably an error in the code.
dbexceptions.FatalError: this query should not be retried.
"""
pass
def _stream_next(self):
"""Returns the next result for a streaming query.
Returns:
row: a row of results, or None if done.
Raises:
dbexceptions.TimeoutError: for connection timeout.
dbexceptions.RequestBacklog: the server is overloaded, and this query
is asked to back off.
dbexceptions.IntegrityError: integrity of an index would not be
guaranteed with this statement.
dbexceptions.DatabaseError: generic database error.
dbexceptions.ProgrammingError: the supplied statements are invalid,
this is probably an error in the code.
dbexceptions.FatalError: this query should not be retried.
"""
pass
|
{
"content_hash": "9cf023ad31d57ee20995f1174f123ca5",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 80,
"avg_line_length": 36.933933933933936,
"alnum_prop": 0.6868038051874136,
"repo_name": "netroby/vitess",
"id": "5293ef2bf4f0c131283f1f2254b1d3da5d868fd3",
"size": "12451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/vtdb/vtgate_client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "40319"
},
{
"name": "CSS",
"bytes": "80183"
},
{
"name": "Go",
"bytes": "4385943"
},
{
"name": "HTML",
"bytes": "65751"
},
{
"name": "Java",
"bytes": "166956"
},
{
"name": "JavaScript",
"bytes": "58128"
},
{
"name": "Liquid",
"bytes": "15617"
},
{
"name": "Makefile",
"bytes": "7039"
},
{
"name": "PHP",
"bytes": "7167"
},
{
"name": "PLpgSQL",
"bytes": "8933"
},
{
"name": "Protocol Buffer",
"bytes": "58573"
},
{
"name": "Python",
"bytes": "938012"
},
{
"name": "Ruby",
"bytes": "465"
},
{
"name": "Shell",
"bytes": "51739"
},
{
"name": "Yacc",
"bytes": "18460"
}
],
"symlink_target": ""
}
|
"""gRPC TensorFlow operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from seed_rl.grpc.python.ops import *
|
{
"content_hash": "6e9bccf8a0d88e2cde8dd9f3078ed880",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 39,
"avg_line_length": 26.428571428571427,
"alnum_prop": 0.7351351351351352,
"repo_name": "google-research/seed_rl",
"id": "b10e97362997ae7e1cde136fa3502a87f7db888c",
"size": "779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grpc/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "41131"
},
{
"name": "Jupyter Notebook",
"bytes": "72883"
},
{
"name": "Python",
"bytes": "614110"
},
{
"name": "Shell",
"bytes": "31284"
},
{
"name": "Starlark",
"bytes": "932"
}
],
"symlink_target": ""
}
|
import logging
from ray.rllib.agents.trainer import with_common_config
from ray.rllib.agents.dqn.dqn import GenericOffPolicyTrainer
from ray.rllib.agents.ddpg.ddpg_tf_policy import DDPGTFPolicy
logger = logging.getLogger(__name__)
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# === Twin Delayed DDPG (TD3) and Soft Actor-Critic (SAC) tricks ===
# TD3: https://spinningup.openai.com/en/latest/algorithms/td3.html
# In addition to settings below, you can use "exploration_noise_type" and
# "exploration_gauss_act_noise" to get IID Gaussian exploration noise
# instead of OU exploration noise.
# twin Q-net
"twin_q": False,
# delayed policy update
"policy_delay": 1,
# target policy smoothing
# (this also replaces OU exploration noise with IID Gaussian exploration
# noise, for now)
"smooth_target_policy": False,
# gaussian stddev of target action noise for smoothing
"target_noise": 0.2,
# target noise limit (bound)
"target_noise_clip": 0.5,
# === Evaluation ===
# Evaluate with epsilon=0 every `evaluation_interval` training iterations.
# The evaluation stats will be reported under the "evaluation" metric key.
# Note that evaluation is currently not parallelized, and that for Ape-X
# metrics are already only reported for the lowest epsilon workers.
"evaluation_interval": None,
# Number of episodes to run per evaluation period.
"evaluation_num_episodes": 10,
# === Model ===
# Apply a state preprocessor with spec given by the "model" config option
# (like other RL algorithms). This is mostly useful if you have a weird
# observation shape, like an image. Disabled by default.
"use_state_preprocessor": False,
# Postprocess the policy network model output with these hidden layers. If
# use_state_preprocessor is False, then these will be the *only* hidden
# layers in the network.
"actor_hiddens": [400, 300],
# Hidden layers activation of the postprocessing stage of the policy
# network
"actor_hidden_activation": "relu",
# Postprocess the critic network model output with these hidden layers;
# again, if use_state_preprocessor is True, then the state will be
# preprocessed by the model specified with the "model" config option first.
"critic_hiddens": [400, 300],
# Hidden layers activation of the postprocessing state of the critic.
"critic_hidden_activation": "relu",
# N-step Q learning
"n_step": 1,
# === Exploration ===
"exploration_config": {
# DDPG uses OrnsteinUhlenbeck (stateful) noise to be added to NN-output
# actions (after a possible pure random phase of n timesteps).
"type": "OrnsteinUhlenbeckNoise",
# For how many timesteps should we return completely random actions,
# before we start adding (scaled) noise?
"random_timesteps": 1000,
# The OU-base scaling factor to always apply to action-added noise.
"ou_base_scale": 0.1,
# The OU theta param.
"ou_theta": 0.15,
# The OU sigma param.
"ou_sigma": 0.2,
# The initial noise scaling factor.
"initial_scale": 1.0,
# The final noise scaling factor.
"final_scale": 1.0,
# Timesteps over which to anneal scale (from initial to final values).
"scale_timesteps": 10000,
},
# Number of env steps to optimize for before returning
"timesteps_per_iteration": 1000,
# Extra configuration that disables exploration.
"evaluation_config": {
"explore": False
},
# === Replay buffer ===
# Size of the replay buffer. Note that if async_updates is set, then
# each worker will have a replay buffer of this size.
"buffer_size": 50000,
# If True prioritized replay buffer will be used.
"prioritized_replay": True,
# Alpha parameter for prioritized replay buffer.
"prioritized_replay_alpha": 0.6,
# Beta parameter for sampling from prioritized replay buffer.
"prioritized_replay_beta": 0.4,
# Time steps over which the beta parameter is annealed.
"prioritized_replay_beta_annealing_timesteps": 20000,
# Final value of beta
"final_prioritized_replay_beta": 0.4,
# Epsilon to add to the TD errors when updating priorities.
"prioritized_replay_eps": 1e-6,
# Whether to LZ4 compress observations
"compress_observations": False,
# If set, this will fix the ratio of replayed from a buffer and learned on
# timesteps to sampled from an environment and stored in the replay buffer
# timesteps. Otherwise, the replay will proceed at the native ratio
# determined by (train_batch_size / rollout_fragment_length).
"training_intensity": None,
# === Optimization ===
# Learning rate for the critic (Q-function) optimizer.
"critic_lr": 1e-3,
# Learning rate for the actor (policy) optimizer.
"actor_lr": 1e-3,
# Update the target network every `target_network_update_freq` steps.
"target_network_update_freq": 0,
# Update the target by \tau * policy + (1-\tau) * target_policy
"tau": 0.002,
# If True, use huber loss instead of squared loss for critic network
# Conventionally, no need to clip gradients if using a huber loss
"use_huber": False,
# Threshold of a huber loss
"huber_threshold": 1.0,
# Weights for L2 regularization
"l2_reg": 1e-6,
# If not None, clip gradients during optimization at this value
"grad_clip": None,
# How many steps of the model to sample before learning starts.
"learning_starts": 1500,
# Update the replay buffer with this many samples at once. Note that this
# setting applies per-worker if num_workers > 1.
"rollout_fragment_length": 1,
# Size of a batched sampled from replay buffer for training. Note that
# if async_updates is set, then each worker returns gradients for a
# batch of this size.
"train_batch_size": 256,
# === Parallelism ===
# Number of workers for collecting samples with. This only makes sense
# to increase if your environment is particularly slow to sample, or if
# you're using the Async or Ape-X optimizers.
"num_workers": 0,
# Whether to compute priorities on workers.
"worker_side_prioritization": False,
# Prevent iterations from going lower than this time span
"min_iter_time_s": 1,
})
# __sphinx_doc_end__
# yapf: enable
def validate_config(config):
if config["model"]["custom_model"]:
logger.warning(
"Setting use_state_preprocessor=True since a custom model "
"was specified.")
config["use_state_preprocessor"] = True
if config["grad_clip"] is not None and config["grad_clip"] <= 0.0:
raise ValueError("`grad_clip` value must be > 0.0!")
if config["exploration_config"]["type"] == "ParameterNoise":
if config["batch_mode"] != "complete_episodes":
logger.warning(
"ParameterNoise Exploration requires `batch_mode` to be "
"'complete_episodes'. Setting batch_mode=complete_episodes.")
config["batch_mode"] = "complete_episodes"
def get_policy_class(config):
if config["framework"] == "torch":
from ray.rllib.agents.ddpg.ddpg_torch_policy import DDPGTorchPolicy
return DDPGTorchPolicy
else:
return DDPGTFPolicy
DDPGTrainer = GenericOffPolicyTrainer.with_updates(
name="DDPG",
default_config=DEFAULT_CONFIG,
default_policy=DDPGTFPolicy,
get_policy_class=get_policy_class,
validate_config=validate_config,
)
|
{
"content_hash": "001c45417e26b38c0b04e71a1b6e4341",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 79,
"avg_line_length": 41.30810810810811,
"alnum_prop": 0.6754776236587281,
"repo_name": "robertnishihara/ray",
"id": "9e580c0f8f868526865646f4c077ac5389e1ac5a",
"size": "7642",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rllib/agents/ddpg/ddpg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "82909"
},
{
"name": "C++",
"bytes": "3971373"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Cython",
"bytes": "179979"
},
{
"name": "Dockerfile",
"bytes": "6468"
},
{
"name": "Go",
"bytes": "23139"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1248954"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "2205"
},
{
"name": "Python",
"bytes": "6567694"
},
{
"name": "Shell",
"bytes": "102477"
},
{
"name": "Starlark",
"bytes": "231513"
},
{
"name": "TypeScript",
"bytes": "147793"
}
],
"symlink_target": ""
}
|
from office365.runtime.client_value import ClientValue
class Thumbnail(ClientValue):
"""
The thumbnail resource type represents a thumbnail for an image, video, document,
or any item that has a bitmap representation.
"""
def __init__(self, content=None, height=None, source_item_id=None, url=None, width=None):
"""
:param str content: The content stream for the thumbnail.
:param int height: The height of the thumbnail, in pixels.
:param str source_item_id: The unique identifier of the item that provided the thumbnail. This is only
available when a folder thumbnail is requested.
:param str url: The URL used to fetch the thumbnail content.
:param int width: The width of the thumbnail, in pixels.
"""
self.content = content
self.height = height
self.sourceItemId = source_item_id
self.url = url
self.width = width
|
{
"content_hash": "70ff71a8bf5c91000098348cc143e0ac",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 110,
"avg_line_length": 41.43478260869565,
"alnum_prop": 0.6610703043022036,
"repo_name": "vgrem/Office365-REST-Python-Client",
"id": "f83b5c46b0a3987749d37890dc8eeaeac2d2e749",
"size": "953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "office365/onedrive/driveitems/thumbnail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1659292"
}
],
"symlink_target": ""
}
|
"""Platform for beewi_smartclim integration."""
from __future__ import annotations
from beewi_smartclim import BeewiSmartClimPoller # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
SensorDeviceClass,
SensorEntity,
)
from homeassistant.const import CONF_MAC, CONF_NAME, PERCENTAGE, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
# Default values
DEFAULT_NAME = "BeeWi SmartClim"
# Sensor config
SENSOR_TYPES = [
[SensorDeviceClass.TEMPERATURE, "Temperature", TEMP_CELSIUS],
[SensorDeviceClass.HUMIDITY, "Humidity", PERCENTAGE],
[SensorDeviceClass.BATTERY, "Battery", PERCENTAGE],
]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the beewi_smartclim platform."""
mac = config[CONF_MAC]
prefix = config[CONF_NAME]
poller = BeewiSmartClimPoller(mac)
sensors = []
for sensor_type in SENSOR_TYPES:
device = sensor_type[0]
name = sensor_type[1]
unit = sensor_type[2]
# `prefix` is the name configured by the user for the sensor, we're appending
# the device type at the end of the name (garden -> garden temperature)
if prefix:
name = f"{prefix} {name}"
sensors.append(BeewiSmartclimSensor(poller, name, mac, device, unit))
add_entities(sensors)
class BeewiSmartclimSensor(SensorEntity):
"""Representation of a Sensor."""
def __init__(self, poller, name, mac, device, unit):
"""Initialize the sensor."""
self._poller = poller
self._attr_name = name
self._device = device
self._attr_native_unit_of_measurement = unit
self._attr_device_class = self._device
self._attr_unique_id = f"{mac}_{device}"
def update(self) -> None:
"""Fetch new state data from the poller."""
self._poller.update_sensor()
self._attr_native_value = None
if self._device == SensorDeviceClass.TEMPERATURE:
self._attr_native_value = self._poller.get_temperature()
if self._device == SensorDeviceClass.HUMIDITY:
self._attr_native_value = self._poller.get_humidity()
if self._device == SensorDeviceClass.BATTERY:
self._attr_native_value = self._poller.get_battery()
|
{
"content_hash": "487b65123d7cf83c95da658ef5f0ed96",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 85,
"avg_line_length": 32.83529411764706,
"alnum_prop": 0.6782515227517019,
"repo_name": "w1ll1am23/home-assistant",
"id": "4d8936859f55577d429c5f9b7c5abb2272dc5ed0",
"size": "2791",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/beewi_smartclim/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
'''
Filter reads from one or more BAMs and output a CSV or a new BAM.
Loci and VCF files may be specified, in which case reads are filtered to
overlap the specified loci or variants.
Examples:
Print basic fields for the reads in a BAM:
%(prog)s test/data/CELSR1/bams/bam_0.bam
Same as above but filter only to reads aligned on the (-) strand, write to a
file instead of stdout, and also include the mapping quality and sequenced
bases in the output:
%(prog)s test/data/CELSR1/bams/bam_0.bam \\
--is-reverse \\
--field mapping_quality query_alignment_sequence \\
--out /tmp/result.csv
Write a bam file consisting of reads with mapping quality >=30 and
overlapping a certain locus:
%(prog)s test/data/CELSR1/bams/bam_0.bam \\
--min-mapping-quality 30 \\
--locus 22:46932040-46932050 \\
--out /tmp/result.bam
Write a bam file consisting of reads overlapping variants from a VCF:
%(prog)s test/data/CELSR1/bams/bam_0.bam \\
--variants test/data/CELSR1/vcfs/vcf_1.vcf \\
--out /tmp/result.bam
Print just the header for a BAM in csv format:
%(prog)s test/data/CELSR1/bams/bam_0.bam --header
'''
from __future__ import absolute_import
import argparse
import sys
import csv
import pysam
from . import configure_logging
from .. import loci_util
from .. import reads_util
from .. import variants_util
from ..read_evidence.pileup_collection import PileupCollection, to_locus
STANDARD_FIELDS = [
"source",
"query_name",
"reference_start",
"reference_end",
"cigarstring",
]
parser = argparse.ArgumentParser(usage=__doc__)
group = parser.add_argument_group("output")
group.add_argument("--out",
help="Output file. Format is guessed from file extension: must be csv or "
"bam. If not specified, csv is written to stdout.")
group.add_argument("--field", nargs="+", default=[],
help="Additional read fields to output as columns in the csv. See pysam "
"documentation (http://pysam.readthedocs.org/en/latest/api.html) for the "
"meaning of these fields. Valid fields include: %s" % (
" ".join(PileupCollection._READ_ATTRIBUTE_NAMES)))
group.add_argument("--no-standard-fields", action="store_true", default=False,
help="Do not include the standard fields (%s) in csv output."
% ', '.join(STANDARD_FIELDS))
group.add_argument("--no-sort", action="store_true", default=False,
help="When outputting a bam, do not call samtools sort.")
group.add_argument(
"--header",
action="store_true",
default=False,
help="Output BAM/SAM header only.")
group.add_argument(
"--header-set",
nargs=4,
action="append",
help="When outputting a bam, set a particular header field to the given "
"value. Example: --header-set RG . SM my_sample")
group.add_argument("-v", "--verbose", action="store_true", default=False)
reads_util.add_args(parser, positional=True)
loci_util.add_args(parser.add_argument_group("loci specification"))
variants_util.add_args(parser)
def run(raw_args=sys.argv[1:]):
args = parser.parse_args(raw_args)
configure_logging(args)
read_sources = reads_util.load_from_args(args)
if not read_sources:
parser.error("No read sources specified.")
loci = loci_util.load_from_args(args) # may be None
variants_df = variants_util.load_from_args_as_dataframe(args)
if variants_df is not None:
variant_loci = loci_util.Loci(
to_locus(variant)
for variant in variants_df["variant"])
loci = variant_loci if loci is None else loci.union(variant_loci)
if args.header:
if loci is not None:
parser.error("If specifying --header don't specify loci.")
if args.field:
parser.error("If specifying --header don't specify fields.")
out_pysam_handle = None
out_csv_writer = out_csv_fd = None
if args.out and (args.out.endswith(".bam") or args.out.endswith(".sam")):
if args.field:
parser.error("Don't specify fields when outputting to bam or sam.")
header = update_header(args, read_sources[0].handle.header)
out_pysam_handle = pysam.AlignmentFile(
args.out,
"wb",
header=header)
elif not args.out or args.out.endswith(".csv"):
out_csv_fd = open(args.out, "w") if args.out else sys.stdout
out_csv_writer = csv.writer(out_csv_fd)
if args.header:
if args.field:
parser.error("Don't specify fields when outputting header.")
out_csv_writer.writerow([
"read_source", "group", "index", "key", "value",
])
else:
columns = (
([] if args.no_standard_fields else STANDARD_FIELDS) +
args.field)
out_csv_writer.writerow(columns)
else:
parser.error(
"Don't know how to write to file with output extension: %s. "
"Supported extensions: csv, bam, sam." % args.out)
num_reads = 0
for read_source in read_sources:
if args.header:
header = update_header(args, read_source.handle.header)
for (group, i, key, value) in reads_util.flatten_header(header):
out_csv_writer.writerow(
[read_source.name, group, str(i), key, value])
continue # we don't look at reads at all.
for read in read_source.reads(loci):
num_reads += 1
if out_pysam_handle is not None:
out_pysam_handle.write(read)
if out_csv_writer is not None:
out_csv_writer.writerow([
str(read_field(read_source, read, field))
for field in columns
])
if out_pysam_handle is not None:
out_pysam_handle.close()
if not args.no_sort:
print("Sorting read file %s" % args.out)
pysam.sort(
"-o", args.out,
"-T", "varlens_reads", args.out,
catch_stdout=False)
print("Wrote %d reads: %s" % (num_reads, args.out))
if out_csv_fd is not None and out_csv_fd is not sys.stdout:
out_csv_fd.close()
print("Wrote: %s" % args.out)
def read_field(read_source, read, field_name):
if field_name == 'source':
return read_source.name
if field_name.startswith("tag:"):
tag_name = field_name[len("tag:"):]
return read.get_tags().get(tag_name)
try:
return getattr(read, field_name)
except AttributeError:
raise ValueError("Invalid read field '%s'. Valid fields include: %s"
% (field_name, ' '.join(dir(read))))
def update_header(args, header):
if args.header_set:
header = dict(header)
for (group, index_string, key, value) in args.header_set:
if not isinstance(header[group], list):
header[group] = [header[group]]
if index_string == ".":
indices = range(len(header[group]))
else:
indices = [int(x) for x in index_string.split(",")]
for index in indices:
header[group][index][key] = value
return header
|
{
"content_hash": "260c68b3e210bee69acb810704035285",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 79,
"avg_line_length": 34.45238095238095,
"alnum_prop": 0.6138217000691085,
"repo_name": "hammerlab/varlens",
"id": "45e60fdb67e63d49f6031abcacbbc9e7d5686e5a",
"size": "7235",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "varlens/commands/reads.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "144393"
},
{
"name": "Shell",
"bytes": "456"
}
],
"symlink_target": ""
}
|
'''
this function can handle oracle script with block comment
handle change drop table table to
begin execute immediate \'drop table UTLMGT_DASHBOARD.temp_gen_dm_auth_2\'; exception when others then null; end
'''
def convertToBODSScript ( path):
f0 = open(path,'r')
f1 = []
for line in f0:
if line.strip():
f1.append(line)
f0.close()
isBegin = True
isCommentBegin = False
for line in f1:
if isCommentBegin == False:
if line.strip(' \t\n\r').startswith('/*') and 'parallel' not in line.lower():
isCommentBegin = True
line = '# ' + line
if line.strip(' \t\n\r').endswith('*/') and 'parallel' not in line.lower():
isCommentBegin = False
print (line)
continue
if line.lstrip(' \t\n\r').startswith('--'):
line = '# ' + line
else:
if line.lstrip(' \t\n\r').find('--') > 0: # this handle in line comment
line = line[0 :line.find('--')] + '\n'
line = line.replace("'", "\\'")
if isBegin==True:
line ="sql('NATLDWH_UTLMGT_DASHBOARD','" + line
isBegin =False
else:
line ="|| '" + line
isBegin =False
if line.rstrip(' \t\n\r').endswith(';'):
line =line.replace(';', " ');\n")
isBegin =True
else:
line =line.rstrip(' \t\n\r') + " '\n"
if line.lower().find("'drop table ") >= 0:
line = line[0:line.lower().index("\'drop table ")] + "\'begin execute immediate \\" + line[line.lower().index("\'drop table "):line.index("\');")] + "\\\'; exception when others then null; end;\');"
if line.lower().find("'drop index ") >= 0:
line = line[0:line.lower().index("\'drop index ")] + "\'begin execute immediate \\" + line[line.lower().index("\'drop index "):line.index("\');")] + "\\\'; exception when others then null; end;\');"
print (line)
else:
line = '# ' + line
print(line)
if line.strip(' \t\n\r').endswith('*/') and 'parallel' not in line.lower() :
isCommentBegin = False
#f1.close()
#convertToBODSScript (r'C:\Users\Wenlei\Desktop\sample.sql')
#convertToBODSScript (r'C:\Users\Wenlei\Desktop\sample2.sql')
convertToBODSScript ( r'C:\Users\Wenlei\Desktop\sample15.sql')
|
{
"content_hash": "92ca98c88aaef0eaad4db79a80052ed7",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 219,
"avg_line_length": 43.145161290322584,
"alnum_prop": 0.47514018691588783,
"repo_name": "wenleicao/wenleicao.github.io",
"id": "97167ddd82dc52707cc344add3e0a41f193917f5",
"size": "2675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Files/read_sql_fn4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "364"
},
{
"name": "HTML",
"bytes": "7889"
},
{
"name": "JavaScript",
"bytes": "1912"
},
{
"name": "Jupyter Notebook",
"bytes": "599055"
},
{
"name": "PowerShell",
"bytes": "1923"
},
{
"name": "Python",
"bytes": "17550"
},
{
"name": "SCSS",
"bytes": "63525"
},
{
"name": "Scala",
"bytes": "2003"
},
{
"name": "TSQL",
"bytes": "6527"
}
],
"symlink_target": ""
}
|
"""
Core components of Home Assistant.
Home Assistant is a Home Automation framework for observing the state
of entities and react to changes.
"""
from __future__ import annotations
import asyncio
from collections.abc import Awaitable, Collection, Coroutine, Iterable, Mapping
import datetime
import enum
import functools
import logging
import os
import pathlib
import re
import threading
from time import monotonic
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, Callable, Optional, TypeVar, cast
import attr
import voluptuous as vol
import yarl
from homeassistant import block_async_io, loader, util
from homeassistant.const import (
ATTR_DOMAIN,
ATTR_FRIENDLY_NAME,
ATTR_NOW,
ATTR_SECONDS,
ATTR_SERVICE,
ATTR_SERVICE_DATA,
CONF_UNIT_SYSTEM_IMPERIAL,
EVENT_CALL_SERVICE,
EVENT_CORE_CONFIG_UPDATE,
EVENT_HOMEASSISTANT_CLOSE,
EVENT_HOMEASSISTANT_FINAL_WRITE,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STARTED,
EVENT_HOMEASSISTANT_STOP,
EVENT_SERVICE_REGISTERED,
EVENT_SERVICE_REMOVED,
EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED,
EVENT_TIMER_OUT_OF_SYNC,
LENGTH_METERS,
MATCH_ALL,
MAX_LENGTH_EVENT_EVENT_TYPE,
MAX_LENGTH_STATE_STATE,
__version__,
)
from homeassistant.exceptions import (
HomeAssistantError,
InvalidEntityFormatError,
InvalidStateError,
MaxLengthExceeded,
ServiceNotFound,
Unauthorized,
)
from homeassistant.util import location
from homeassistant.util.async_ import (
fire_coroutine_threadsafe,
run_callback_threadsafe,
shutdown_run_callback_threadsafe,
)
import homeassistant.util.dt as dt_util
from homeassistant.util.timeout import TimeoutManager
from homeassistant.util.unit_system import IMPERIAL_SYSTEM, METRIC_SYSTEM, UnitSystem
import homeassistant.util.uuid as uuid_util
# Typing imports that create a circular dependency
if TYPE_CHECKING:
from homeassistant.auth import AuthManager
from homeassistant.components.http import HomeAssistantHTTP
from homeassistant.config_entries import ConfigEntries
STAGE_1_SHUTDOWN_TIMEOUT = 100
STAGE_2_SHUTDOWN_TIMEOUT = 60
STAGE_3_SHUTDOWN_TIMEOUT = 30
block_async_io.enable()
T = TypeVar("T")
_UNDEF: dict = {} # Internal; not helpers.typing.UNDEFINED due to circular dependency
# pylint: disable=invalid-name
CALLABLE_T = TypeVar("CALLABLE_T", bound=Callable)
CALLBACK_TYPE = Callable[[], None]
# pylint: enable=invalid-name
CORE_STORAGE_KEY = "core.config"
CORE_STORAGE_VERSION = 1
DOMAIN = "homeassistant"
# How long to wait to log tasks that are blocking
BLOCK_LOG_TIMEOUT = 60
# How long we wait for the result of a service call
SERVICE_CALL_LIMIT = 10 # seconds
# Source of core configuration
SOURCE_DISCOVERED = "discovered"
SOURCE_STORAGE = "storage"
SOURCE_YAML = "yaml"
# How long to wait until things that run on startup have to finish.
TIMEOUT_EVENT_START = 15
_LOGGER = logging.getLogger(__name__)
def split_entity_id(entity_id: str) -> list[str]:
"""Split a state entity ID into domain and object ID."""
return entity_id.split(".", 1)
VALID_ENTITY_ID = re.compile(r"^(?!.+__)(?!_)[\da-z_]+(?<!_)\.(?!_)[\da-z_]+(?<!_)$")
def valid_entity_id(entity_id: str) -> bool:
"""Test if an entity ID is a valid format.
Format: <domain>.<entity> where both are slugs.
"""
return VALID_ENTITY_ID.match(entity_id) is not None
def valid_state(state: str) -> bool:
"""Test if a state is valid."""
return len(state) <= MAX_LENGTH_STATE_STATE
def callback(func: CALLABLE_T) -> CALLABLE_T:
"""Annotation to mark method as safe to call from within the event loop."""
setattr(func, "_hass_callback", True)
return func
def is_callback(func: Callable[..., Any]) -> bool:
"""Check if function is safe to be called in the event loop."""
return getattr(func, "_hass_callback", False) is True
@enum.unique
class HassJobType(enum.Enum):
"""Represent a job type."""
Coroutinefunction = 1
Callback = 2
Executor = 3
class HassJob:
"""Represent a job to be run later.
We check the callable type in advance
so we can avoid checking it every time
we run the job.
"""
__slots__ = ("job_type", "target")
def __init__(self, target: Callable) -> None:
"""Create a job object."""
if asyncio.iscoroutine(target):
raise ValueError("Coroutine not allowed to be passed to HassJob")
self.target = target
self.job_type = _get_callable_job_type(target)
def __repr__(self) -> str:
"""Return the job."""
return f"<Job {self.job_type} {self.target}>"
def _get_callable_job_type(target: Callable) -> HassJobType:
"""Determine the job type from the callable."""
# Check for partials to properly determine if coroutine function
check_target = target
while isinstance(check_target, functools.partial):
check_target = check_target.func
if asyncio.iscoroutinefunction(check_target):
return HassJobType.Coroutinefunction
if is_callback(check_target):
return HassJobType.Callback
return HassJobType.Executor
class CoreState(enum.Enum):
"""Represent the current state of Home Assistant."""
not_running = "NOT_RUNNING"
starting = "STARTING"
running = "RUNNING"
stopping = "STOPPING"
final_write = "FINAL_WRITE"
stopped = "STOPPED"
def __str__(self) -> str: # pylint: disable=invalid-str-returned
"""Return the event."""
return self.value
class HomeAssistant:
"""Root object of the Home Assistant home automation."""
auth: AuthManager
http: HomeAssistantHTTP = None # type: ignore
config_entries: ConfigEntries = None # type: ignore
def __init__(self) -> None:
"""Initialize new Home Assistant object."""
self.loop = asyncio.get_running_loop()
self._pending_tasks: list = []
self._track_task = True
self.bus = EventBus(self)
self.services = ServiceRegistry(self)
self.states = StateMachine(self.bus, self.loop)
self.config = Config(self)
self.components = loader.Components(self)
self.helpers = loader.Helpers(self)
# This is a dictionary that any component can store any data on.
self.data: dict = {}
self.state: CoreState = CoreState.not_running
self.exit_code: int = 0
# If not None, use to signal end-of-loop
self._stopped: asyncio.Event | None = None
# Timeout handler for Core/Helper namespace
self.timeout: TimeoutManager = TimeoutManager()
@property
def is_running(self) -> bool:
"""Return if Home Assistant is running."""
return self.state in (CoreState.starting, CoreState.running)
@property
def is_stopping(self) -> bool:
"""Return if Home Assistant is stopping."""
return self.state in (CoreState.stopping, CoreState.final_write)
def start(self) -> int:
"""Start Home Assistant.
Note: This function is only used for testing.
For regular use, use "await hass.run()".
"""
# Register the async start
fire_coroutine_threadsafe(self.async_start(), self.loop)
# Run forever
# Block until stopped
_LOGGER.info("Starting Home Assistant core loop")
self.loop.run_forever()
return self.exit_code
async def async_run(self, *, attach_signals: bool = True) -> int:
"""Home Assistant main entry point.
Start Home Assistant and block until stopped.
This method is a coroutine.
"""
if self.state != CoreState.not_running:
raise RuntimeError("Home Assistant is already running")
# _async_stop will set this instead of stopping the loop
self._stopped = asyncio.Event()
await self.async_start()
if attach_signals:
# pylint: disable=import-outside-toplevel
from homeassistant.helpers.signal import async_register_signal_handling
async_register_signal_handling(self)
await self._stopped.wait()
return self.exit_code
async def async_start(self) -> None:
"""Finalize startup from inside the event loop.
This method is a coroutine.
"""
_LOGGER.info("Starting Home Assistant")
setattr(self.loop, "_thread_ident", threading.get_ident())
self.state = CoreState.starting
self.bus.async_fire(EVENT_CORE_CONFIG_UPDATE)
self.bus.async_fire(EVENT_HOMEASSISTANT_START)
try:
# Only block for EVENT_HOMEASSISTANT_START listener
self.async_stop_track_tasks()
async with self.timeout.async_timeout(TIMEOUT_EVENT_START):
await self.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning(
"Something is blocking Home Assistant from wrapping up the "
"start up phase. We're going to continue anyway. Please "
"report the following info at https://github.com/home-assistant/core/issues: %s",
", ".join(self.config.components),
)
# Allow automations to set up the start triggers before changing state
await asyncio.sleep(0)
if self.state != CoreState.starting:
_LOGGER.warning(
"Home Assistant startup has been interrupted. "
"Its state may be inconsistent"
)
return
self.state = CoreState.running
self.bus.async_fire(EVENT_CORE_CONFIG_UPDATE)
self.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
_async_create_timer(self)
def add_job(self, target: Callable[..., Any], *args: Any) -> None:
"""Add job to the executor pool.
target: target to call.
args: parameters for method to call.
"""
if target is None:
raise ValueError("Don't call add_job with None")
self.loop.call_soon_threadsafe(self.async_add_job, target, *args)
@callback
def async_add_job(
self, target: Callable[..., Any], *args: Any
) -> asyncio.Future | None:
"""Add a job from within the event loop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
if target is None:
raise ValueError("Don't call async_add_job with None")
if asyncio.iscoroutine(target):
return self.async_create_task(cast(Coroutine, target))
return self.async_add_hass_job(HassJob(target), *args)
@callback
def async_add_hass_job(self, hassjob: HassJob, *args: Any) -> asyncio.Future | None:
"""Add a HassJob from within the event loop.
This method must be run in the event loop.
hassjob: HassJob to call.
args: parameters for method to call.
"""
if hassjob.job_type == HassJobType.Coroutinefunction:
task = self.loop.create_task(hassjob.target(*args))
elif hassjob.job_type == HassJobType.Callback:
self.loop.call_soon(hassjob.target, *args)
return None
else:
task = self.loop.run_in_executor( # type: ignore
None, hassjob.target, *args
)
# If a task is scheduled
if self._track_task:
self._pending_tasks.append(task)
return task
def create_task(self, target: Awaitable) -> None:
"""Add task to the executor pool.
target: target to call.
"""
self.loop.call_soon_threadsafe(self.async_create_task, target)
@callback
def async_create_task(self, target: Awaitable) -> asyncio.tasks.Task:
"""Create a task from within the eventloop.
This method must be run in the event loop.
target: target to call.
"""
task: asyncio.tasks.Task = self.loop.create_task(target)
if self._track_task:
self._pending_tasks.append(task)
return task
@callback
def async_add_executor_job(
self, target: Callable[..., T], *args: Any
) -> Awaitable[T]:
"""Add an executor job from within the event loop."""
task = self.loop.run_in_executor(None, target, *args)
# If a task is scheduled
if self._track_task:
self._pending_tasks.append(task)
return task
@callback
def async_track_tasks(self) -> None:
"""Track tasks so you can wait for all tasks to be done."""
self._track_task = True
@callback
def async_stop_track_tasks(self) -> None:
"""Stop track tasks so you can't wait for all tasks to be done."""
self._track_task = False
@callback
def async_run_hass_job(self, hassjob: HassJob, *args: Any) -> asyncio.Future | None:
"""Run a HassJob from within the event loop.
This method must be run in the event loop.
hassjob: HassJob
args: parameters for method to call.
"""
if hassjob.job_type == HassJobType.Callback:
hassjob.target(*args)
return None
return self.async_add_hass_job(hassjob, *args)
@callback
def async_run_job(
self, target: Callable[..., None | Awaitable], *args: Any
) -> asyncio.Future | None:
"""Run a job from within the event loop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
if asyncio.iscoroutine(target):
return self.async_create_task(cast(Coroutine, target))
return self.async_run_hass_job(HassJob(target), *args)
def block_till_done(self) -> None:
"""Block until all pending work is done."""
asyncio.run_coroutine_threadsafe(
self.async_block_till_done(), self.loop
).result()
async def async_block_till_done(self) -> None:
"""Block until all pending work is done."""
# To flush out any call_soon_threadsafe
await asyncio.sleep(0)
start_time: float | None = None
while self._pending_tasks:
pending = [task for task in self._pending_tasks if not task.done()]
self._pending_tasks.clear()
if pending:
await self._await_and_log_pending(pending)
if start_time is None:
# Avoid calling monotonic() until we know
# we may need to start logging blocked tasks.
start_time = 0
elif start_time == 0:
# If we have waited twice then we set the start
# time
start_time = monotonic()
elif monotonic() - start_time > BLOCK_LOG_TIMEOUT:
# We have waited at least three loops and new tasks
# continue to block. At this point we start
# logging all waiting tasks.
for task in pending:
_LOGGER.debug("Waiting for task: %s", task)
else:
await asyncio.sleep(0)
async def _await_and_log_pending(self, pending: Iterable[Awaitable[Any]]) -> None:
"""Await and log tasks that take a long time."""
wait_time = 0
while pending:
_, pending = await asyncio.wait(pending, timeout=BLOCK_LOG_TIMEOUT)
if not pending:
return
wait_time += BLOCK_LOG_TIMEOUT
for task in pending:
_LOGGER.debug("Waited %s seconds for task: %s", wait_time, task)
def stop(self) -> None:
"""Stop Home Assistant and shuts down all threads."""
if self.state == CoreState.not_running: # just ignore
return
fire_coroutine_threadsafe(self.async_stop(), self.loop)
async def async_stop(self, exit_code: int = 0, *, force: bool = False) -> None:
"""Stop Home Assistant and shuts down all threads.
The "force" flag commands async_stop to proceed regardless of
Home Assistan't current state. You should not set this flag
unless you're testing.
This method is a coroutine.
"""
if not force:
# Some tests require async_stop to run,
# regardless of the state of the loop.
if self.state == CoreState.not_running: # just ignore
return
if self.state in [CoreState.stopping, CoreState.final_write]:
_LOGGER.info("Additional call to async_stop was ignored")
return
if self.state == CoreState.starting:
# This may not work
_LOGGER.warning(
"Stopping Home Assistant before startup has completed may fail"
)
# stage 1
self.state = CoreState.stopping
self.async_track_tasks()
self.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
try:
async with self.timeout.async_timeout(STAGE_1_SHUTDOWN_TIMEOUT):
await self.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning(
"Timed out waiting for shutdown stage 1 to complete, the shutdown will continue"
)
# stage 2
self.state = CoreState.final_write
self.bus.async_fire(EVENT_HOMEASSISTANT_FINAL_WRITE)
try:
async with self.timeout.async_timeout(STAGE_2_SHUTDOWN_TIMEOUT):
await self.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning(
"Timed out waiting for shutdown stage 2 to complete, the shutdown will continue"
)
# stage 3
self.state = CoreState.not_running
self.bus.async_fire(EVENT_HOMEASSISTANT_CLOSE)
# Prevent run_callback_threadsafe from scheduling any additional
# callbacks in the event loop as callbacks created on the futures
# it returns will never run after the final `self.async_block_till_done`
# which will cause the futures to block forever when waiting for
# the `result()` which will cause a deadlock when shutting down the executor.
shutdown_run_callback_threadsafe(self.loop)
try:
async with self.timeout.async_timeout(STAGE_3_SHUTDOWN_TIMEOUT):
await self.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning(
"Timed out waiting for shutdown stage 3 to complete, the shutdown will continue"
)
self.exit_code = exit_code
self.state = CoreState.stopped
if self._stopped is not None:
self._stopped.set()
@attr.s(slots=True, frozen=True)
class Context:
"""The context that triggered something."""
user_id: str = attr.ib(default=None)
parent_id: str | None = attr.ib(default=None)
id: str = attr.ib(factory=uuid_util.random_uuid_hex)
def as_dict(self) -> dict[str, str | None]:
"""Return a dictionary representation of the context."""
return {"id": self.id, "parent_id": self.parent_id, "user_id": self.user_id}
class EventOrigin(enum.Enum):
"""Represent the origin of an event."""
local = "LOCAL"
remote = "REMOTE"
def __str__(self) -> str: # pylint: disable=invalid-str-returned
"""Return the event."""
return self.value
class Event:
"""Representation of an event within the bus."""
__slots__ = ["event_type", "data", "origin", "time_fired", "context"]
def __init__(
self,
event_type: str,
data: dict[str, Any] | None = None,
origin: EventOrigin = EventOrigin.local,
time_fired: datetime.datetime | None = None,
context: Context | None = None,
) -> None:
"""Initialize a new event."""
self.event_type = event_type
self.data = data or {}
self.origin = origin
self.time_fired = time_fired or dt_util.utcnow()
self.context: Context = context or Context()
def __hash__(self) -> int:
"""Make hashable."""
# The only event type that shares context are the TIME_CHANGED
return hash((self.event_type, self.context.id, self.time_fired))
def as_dict(self) -> dict[str, Any]:
"""Create a dict representation of this Event.
Async friendly.
"""
return {
"event_type": self.event_type,
"data": dict(self.data),
"origin": str(self.origin.value),
"time_fired": self.time_fired.isoformat(),
"context": self.context.as_dict(),
}
def __repr__(self) -> str:
"""Return the representation."""
if self.data:
return f"<Event {self.event_type}[{str(self.origin)[0]}]: {util.repr_helper(self.data)}>"
return f"<Event {self.event_type}[{str(self.origin)[0]}]>"
def __eq__(self, other: Any) -> bool:
"""Return the comparison."""
return ( # type: ignore
self.__class__ == other.__class__
and self.event_type == other.event_type
and self.data == other.data
and self.origin == other.origin
and self.time_fired == other.time_fired
and self.context == other.context
)
class EventBus:
"""Allow the firing of and listening for events."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize a new event bus."""
self._listeners: dict[str, list[tuple[HassJob, Callable | None]]] = {}
self._hass = hass
@callback
def async_listeners(self) -> dict[str, int]:
"""Return dictionary with events and the number of listeners.
This method must be run in the event loop.
"""
return {key: len(self._listeners[key]) for key in self._listeners}
@property
def listeners(self) -> dict[str, int]:
"""Return dictionary with events and the number of listeners."""
return run_callback_threadsafe(self._hass.loop, self.async_listeners).result()
def fire(
self,
event_type: str,
event_data: dict | None = None,
origin: EventOrigin = EventOrigin.local,
context: Context | None = None,
) -> None:
"""Fire an event."""
self._hass.loop.call_soon_threadsafe(
self.async_fire, event_type, event_data, origin, context
)
@callback
def async_fire(
self,
event_type: str,
event_data: dict[str, Any] | None = None,
origin: EventOrigin = EventOrigin.local,
context: Context | None = None,
time_fired: datetime.datetime | None = None,
) -> None:
"""Fire an event.
This method must be run in the event loop.
"""
if len(event_type) > MAX_LENGTH_EVENT_EVENT_TYPE:
raise MaxLengthExceeded(
event_type, "event_type", MAX_LENGTH_EVENT_EVENT_TYPE
)
listeners = self._listeners.get(event_type, [])
# EVENT_HOMEASSISTANT_CLOSE should go only to his listeners
match_all_listeners = self._listeners.get(MATCH_ALL)
if match_all_listeners is not None and event_type != EVENT_HOMEASSISTANT_CLOSE:
listeners = match_all_listeners + listeners
event = Event(event_type, event_data, origin, time_fired, context)
if event_type != EVENT_TIME_CHANGED:
_LOGGER.debug("Bus:Handling %s", event)
if not listeners:
return
for job, event_filter in listeners:
if event_filter is not None:
try:
if not event_filter(event):
continue
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error in event filter")
continue
self._hass.async_add_hass_job(job, event)
def listen(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
"""
async_remove_listener = run_callback_threadsafe(
self._hass.loop, self.async_listen, event_type, listener
).result()
def remove_listener() -> None:
"""Remove the listener."""
run_callback_threadsafe(self._hass.loop, async_remove_listener).result()
return remove_listener
@callback
def async_listen(
self,
event_type: str,
listener: Callable,
event_filter: Callable | None = None,
) -> CALLBACK_TYPE:
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
An optional event_filter, which must be a callable decorated with
@callback that returns a boolean value, determines if the
listener callable should run.
This method must be run in the event loop.
"""
if event_filter is not None and not is_callback(event_filter):
raise HomeAssistantError(f"Event filter {event_filter} is not a callback")
return self._async_listen_filterable_job(
event_type, (HassJob(listener), event_filter)
)
@callback
def _async_listen_filterable_job(
self, event_type: str, filterable_job: tuple[HassJob, Callable | None]
) -> CALLBACK_TYPE:
self._listeners.setdefault(event_type, []).append(filterable_job)
def remove_listener() -> None:
"""Remove the listener."""
self._async_remove_listener(event_type, filterable_job)
return remove_listener
def listen_once(
self, event_type: str, listener: Callable[[Event], None]
) -> CALLBACK_TYPE:
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns function to unsubscribe the listener.
"""
async_remove_listener = run_callback_threadsafe(
self._hass.loop, self.async_listen_once, event_type, listener
).result()
def remove_listener() -> None:
"""Remove the listener."""
run_callback_threadsafe(self._hass.loop, async_remove_listener).result()
return remove_listener
@callback
def async_listen_once(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns registered listener that can be used with remove_listener.
This method must be run in the event loop.
"""
filterable_job: tuple[HassJob, Callable | None] | None = None
@callback
def _onetime_listener(event: Event) -> None:
"""Remove listener from event bus and then fire listener."""
nonlocal filterable_job
if hasattr(_onetime_listener, "run"):
return
# Set variable so that we will never run twice.
# Because the event bus loop might have async_fire queued multiple
# times, its possible this listener may already be lined up
# multiple times as well.
# This will make sure the second time it does nothing.
setattr(_onetime_listener, "run", True)
assert filterable_job is not None
self._async_remove_listener(event_type, filterable_job)
self._hass.async_run_job(listener, event)
filterable_job = (HassJob(_onetime_listener), None)
return self._async_listen_filterable_job(event_type, filterable_job)
@callback
def _async_remove_listener(
self, event_type: str, filterable_job: tuple[HassJob, Callable | None]
) -> None:
"""Remove a listener of a specific event_type.
This method must be run in the event loop.
"""
try:
self._listeners[event_type].remove(filterable_job)
# delete event_type list if empty
if not self._listeners[event_type]:
self._listeners.pop(event_type)
except (KeyError, ValueError):
# KeyError is key event_type listener did not exist
# ValueError if listener did not exist within event_type
_LOGGER.exception(
"Unable to remove unknown job listener %s", filterable_job
)
class State:
"""Object to represent a state within the state machine.
entity_id: the entity that is represented.
state: the state of the entity
attributes: extra information on entity and state
last_changed: last time the state was changed, not the attributes.
last_updated: last time this object was updated.
context: Context in which it was created
domain: Domain of this state.
object_id: Object id of this state.
"""
__slots__ = [
"entity_id",
"state",
"attributes",
"last_changed",
"last_updated",
"context",
"domain",
"object_id",
"_as_dict",
]
def __init__(
self,
entity_id: str,
state: str,
attributes: Mapping[str, Any] | None = None,
last_changed: datetime.datetime | None = None,
last_updated: datetime.datetime | None = None,
context: Context | None = None,
validate_entity_id: bool | None = True,
) -> None:
"""Initialize a new state."""
state = str(state)
if validate_entity_id and not valid_entity_id(entity_id):
raise InvalidEntityFormatError(
f"Invalid entity id encountered: {entity_id}. "
"Format should be <domain>.<object_id>"
)
if not valid_state(state):
raise InvalidStateError(
f"Invalid state encountered for entity ID: {entity_id}. "
"State max length is 255 characters."
)
self.entity_id = entity_id.lower()
self.state = state
self.attributes = MappingProxyType(attributes or {})
self.last_updated = last_updated or dt_util.utcnow()
self.last_changed = last_changed or self.last_updated
self.context = context or Context()
self.domain, self.object_id = split_entity_id(self.entity_id)
self._as_dict: dict[str, Collection[Any]] | None = None
@property
def name(self) -> str:
"""Name of this state."""
return self.attributes.get(ATTR_FRIENDLY_NAME) or self.object_id.replace(
"_", " "
)
def as_dict(self) -> dict:
"""Return a dict representation of the State.
Async friendly.
To be used for JSON serialization.
Ensures: state == State.from_dict(state.as_dict())
"""
if not self._as_dict:
last_changed_isoformat = self.last_changed.isoformat()
if self.last_changed == self.last_updated:
last_updated_isoformat = last_changed_isoformat
else:
last_updated_isoformat = self.last_updated.isoformat()
self._as_dict = {
"entity_id": self.entity_id,
"state": self.state,
"attributes": dict(self.attributes),
"last_changed": last_changed_isoformat,
"last_updated": last_updated_isoformat,
"context": self.context.as_dict(),
}
return self._as_dict
@classmethod
def from_dict(cls, json_dict: dict) -> Any:
"""Initialize a state from a dict.
Async friendly.
Ensures: state == State.from_json_dict(state.to_json_dict())
"""
if not (json_dict and "entity_id" in json_dict and "state" in json_dict):
return None
last_changed = json_dict.get("last_changed")
if isinstance(last_changed, str):
last_changed = dt_util.parse_datetime(last_changed)
last_updated = json_dict.get("last_updated")
if isinstance(last_updated, str):
last_updated = dt_util.parse_datetime(last_updated)
context = json_dict.get("context")
if context:
context = Context(id=context.get("id"), user_id=context.get("user_id"))
return cls(
json_dict["entity_id"],
json_dict["state"],
json_dict.get("attributes"),
last_changed,
last_updated,
context,
)
def __eq__(self, other: Any) -> bool:
"""Return the comparison of the state."""
return ( # type: ignore
self.__class__ == other.__class__
and self.entity_id == other.entity_id
and self.state == other.state
and self.attributes == other.attributes
and self.context == other.context
)
def __repr__(self) -> str:
"""Return the representation of the states."""
attrs = f"; {util.repr_helper(self.attributes)}" if self.attributes else ""
return (
f"<state {self.entity_id}={self.state}{attrs}"
f" @ {dt_util.as_local(self.last_changed).isoformat()}>"
)
class StateMachine:
"""Helper class that tracks the state of different entities."""
def __init__(self, bus: EventBus, loop: asyncio.events.AbstractEventLoop) -> None:
"""Initialize state machine."""
self._states: dict[str, State] = {}
self._reservations: set[str] = set()
self._bus = bus
self._loop = loop
def entity_ids(self, domain_filter: str | None = None) -> list[str]:
"""List of entity ids that are being tracked."""
future = run_callback_threadsafe(
self._loop, self.async_entity_ids, domain_filter
)
return future.result()
@callback
def async_entity_ids(
self, domain_filter: str | Iterable | None = None
) -> list[str]:
"""List of entity ids that are being tracked.
This method must be run in the event loop.
"""
if domain_filter is None:
return list(self._states)
if isinstance(domain_filter, str):
domain_filter = (domain_filter.lower(),)
return [
state.entity_id
for state in self._states.values()
if state.domain in domain_filter
]
@callback
def async_entity_ids_count(
self, domain_filter: str | Iterable | None = None
) -> int:
"""Count the entity ids that are being tracked.
This method must be run in the event loop.
"""
if domain_filter is None:
return len(self._states)
if isinstance(domain_filter, str):
domain_filter = (domain_filter.lower(),)
return len(
[None for state in self._states.values() if state.domain in domain_filter]
)
def all(self, domain_filter: str | Iterable | None = None) -> list[State]:
"""Create a list of all states."""
return run_callback_threadsafe(
self._loop, self.async_all, domain_filter
).result()
@callback
def async_all(self, domain_filter: str | Iterable | None = None) -> list[State]:
"""Create a list of all states matching the filter.
This method must be run in the event loop.
"""
if domain_filter is None:
return list(self._states.values())
if isinstance(domain_filter, str):
domain_filter = (domain_filter.lower(),)
return [
state for state in self._states.values() if state.domain in domain_filter
]
def get(self, entity_id: str) -> State | None:
"""Retrieve state of entity_id or None if not found.
Async friendly.
"""
return self._states.get(entity_id.lower())
def is_state(self, entity_id: str, state: str) -> bool:
"""Test if entity exists and is in specified state.
Async friendly.
"""
state_obj = self.get(entity_id)
return state_obj is not None and state_obj.state == state
def remove(self, entity_id: str) -> bool:
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
"""
return run_callback_threadsafe(
self._loop, self.async_remove, entity_id
).result()
@callback
def async_remove(self, entity_id: str, context: Context | None = None) -> bool:
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
This method must be run in the event loop.
"""
entity_id = entity_id.lower()
old_state = self._states.pop(entity_id, None)
if entity_id in self._reservations:
self._reservations.remove(entity_id)
if old_state is None:
return False
self._bus.async_fire(
EVENT_STATE_CHANGED,
{"entity_id": entity_id, "old_state": old_state, "new_state": None},
EventOrigin.local,
context=context,
)
return True
def set(
self,
entity_id: str,
new_state: str,
attributes: Mapping[str, Any] | None = None,
force_update: bool = False,
context: Context | None = None,
) -> None:
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
"""
run_callback_threadsafe(
self._loop,
self.async_set,
entity_id,
new_state,
attributes,
force_update,
context,
).result()
@callback
def async_reserve(self, entity_id: str) -> None:
"""Reserve a state in the state machine for an entity being added.
This must not fire an event when the state is reserved.
This avoids a race condition where multiple entities with the same
entity_id are added.
"""
entity_id = entity_id.lower()
if entity_id in self._states or entity_id in self._reservations:
raise HomeAssistantError(
"async_reserve must not be called once the state is in the state machine."
)
self._reservations.add(entity_id)
@callback
def async_available(self, entity_id: str) -> bool:
"""Check to see if an entity_id is available to be used."""
entity_id = entity_id.lower()
return entity_id not in self._states and entity_id not in self._reservations
@callback
def async_set(
self,
entity_id: str,
new_state: str,
attributes: Mapping[str, Any] | None = None,
force_update: bool = False,
context: Context | None = None,
) -> None:
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
This method must be run in the event loop.
"""
entity_id = entity_id.lower()
new_state = str(new_state)
attributes = attributes or {}
old_state = self._states.get(entity_id)
if old_state is None:
same_state = False
same_attr = False
last_changed = None
else:
same_state = old_state.state == new_state and not force_update
same_attr = old_state.attributes == MappingProxyType(attributes)
last_changed = old_state.last_changed if same_state else None
if same_state and same_attr:
return
if context is None:
context = Context()
now = dt_util.utcnow()
state = State(
entity_id,
new_state,
attributes,
last_changed,
now,
context,
old_state is None,
)
self._states[entity_id] = state
self._bus.async_fire(
EVENT_STATE_CHANGED,
{"entity_id": entity_id, "old_state": old_state, "new_state": state},
EventOrigin.local,
context,
time_fired=now,
)
class Service:
"""Representation of a callable service."""
__slots__ = ["job", "schema"]
def __init__(
self,
func: Callable,
schema: vol.Schema | None,
context: Context | None = None,
) -> None:
"""Initialize a service."""
self.job = HassJob(func)
self.schema = schema
class ServiceCall:
"""Representation of a call to a service."""
__slots__ = ["domain", "service", "data", "context"]
def __init__(
self,
domain: str,
service: str,
data: dict | None = None,
context: Context | None = None,
) -> None:
"""Initialize a service call."""
self.domain = domain.lower()
self.service = service.lower()
self.data = MappingProxyType(data or {})
self.context = context or Context()
def __repr__(self) -> str:
"""Return the representation of the service."""
if self.data:
return (
f"<ServiceCall {self.domain}.{self.service} "
f"(c:{self.context.id}): {util.repr_helper(self.data)}>"
)
return f"<ServiceCall {self.domain}.{self.service} (c:{self.context.id})>"
class ServiceRegistry:
"""Offer the services over the eventbus."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize a service registry."""
self._services: dict[str, dict[str, Service]] = {}
self._hass = hass
@property
def services(self) -> dict[str, dict[str, Service]]:
"""Return dictionary with per domain a list of available services."""
return run_callback_threadsafe(self._hass.loop, self.async_services).result()
@callback
def async_services(self) -> dict[str, dict[str, Service]]:
"""Return dictionary with per domain a list of available services.
This method must be run in the event loop.
"""
return {domain: self._services[domain].copy() for domain in self._services}
def has_service(self, domain: str, service: str) -> bool:
"""Test if specified service exists.
Async friendly.
"""
return service.lower() in self._services.get(domain.lower(), [])
def register(
self,
domain: str,
service: str,
service_func: Callable,
schema: vol.Schema | None = None,
) -> None:
"""
Register a service.
Schema is called to coerce and validate the service data.
"""
run_callback_threadsafe(
self._hass.loop, self.async_register, domain, service, service_func, schema
).result()
@callback
def async_register(
self,
domain: str,
service: str,
service_func: Callable,
schema: vol.Schema | None = None,
) -> None:
"""
Register a service.
Schema is called to coerce and validate the service data.
This method must be run in the event loop.
"""
domain = domain.lower()
service = service.lower()
service_obj = Service(service_func, schema)
if domain in self._services:
self._services[domain][service] = service_obj
else:
self._services[domain] = {service: service_obj}
self._hass.bus.async_fire(
EVENT_SERVICE_REGISTERED, {ATTR_DOMAIN: domain, ATTR_SERVICE: service}
)
def remove(self, domain: str, service: str) -> None:
"""Remove a registered service from service handler."""
run_callback_threadsafe(
self._hass.loop, self.async_remove, domain, service
).result()
@callback
def async_remove(self, domain: str, service: str) -> None:
"""Remove a registered service from service handler.
This method must be run in the event loop.
"""
domain = domain.lower()
service = service.lower()
if service not in self._services.get(domain, {}):
_LOGGER.warning("Unable to remove unknown service %s/%s", domain, service)
return
self._services[domain].pop(service)
if not self._services[domain]:
self._services.pop(domain)
self._hass.bus.async_fire(
EVENT_SERVICE_REMOVED, {ATTR_DOMAIN: domain, ATTR_SERVICE: service}
)
def call(
self,
domain: str,
service: str,
service_data: dict | None = None,
blocking: bool = False,
context: Context | None = None,
limit: float | None = SERVICE_CALL_LIMIT,
target: dict | None = None,
) -> bool | None:
"""
Call a service.
See description of async_call for details.
"""
return asyncio.run_coroutine_threadsafe(
self.async_call(
domain, service, service_data, blocking, context, limit, target
),
self._hass.loop,
).result()
async def async_call(
self,
domain: str,
service: str,
service_data: dict | None = None,
blocking: bool = False,
context: Context | None = None,
limit: float | None = SERVICE_CALL_LIMIT,
target: dict | None = None,
) -> bool | None:
"""
Call a service.
Specify blocking=True to wait until service is executed.
Waits a maximum of limit, which may be None for no timeout.
If blocking = True, will return boolean if service executed
successfully within limit.
This method will fire an event to indicate the service has been called.
Because the service is sent as an event you are not allowed to use
the keys ATTR_DOMAIN and ATTR_SERVICE in your service_data.
This method is a coroutine.
"""
domain = domain.lower()
service = service.lower()
context = context or Context()
service_data = service_data or {}
try:
handler = self._services[domain][service]
except KeyError:
raise ServiceNotFound(domain, service) from None
if target:
service_data.update(target)
if handler.schema:
try:
processed_data = handler.schema(service_data)
except vol.Invalid:
_LOGGER.debug(
"Invalid data for service call %s.%s: %s",
domain,
service,
service_data,
)
raise
else:
processed_data = service_data
service_call = ServiceCall(domain, service, processed_data, context)
self._hass.bus.async_fire(
EVENT_CALL_SERVICE,
{
ATTR_DOMAIN: domain.lower(),
ATTR_SERVICE: service.lower(),
ATTR_SERVICE_DATA: service_data,
},
context=context,
)
coro = self._execute_service(handler, service_call)
if not blocking:
self._run_service_in_background(coro, service_call)
return None
task = self._hass.async_create_task(coro)
try:
await asyncio.wait({task}, timeout=limit)
except asyncio.CancelledError:
# Task calling us was cancelled, so cancel service call task, and wait for
# it to be cancelled, within reason, before leaving.
_LOGGER.debug("Service call was cancelled: %s", service_call)
task.cancel()
await asyncio.wait({task}, timeout=SERVICE_CALL_LIMIT)
raise
if task.cancelled():
# Service call task was cancelled some other way, such as during shutdown.
_LOGGER.debug("Service was cancelled: %s", service_call)
raise asyncio.CancelledError
if task.done():
# Propagate any exceptions that might have happened during service call.
task.result()
# Service call completed successfully!
return True
# Service call task did not complete before timeout expired.
# Let it keep running in background.
self._run_service_in_background(task, service_call)
_LOGGER.debug("Service did not complete before timeout: %s", service_call)
return False
def _run_service_in_background(
self, coro_or_task: Coroutine | asyncio.Task, service_call: ServiceCall
) -> None:
"""Run service call in background, catching and logging any exceptions."""
async def catch_exceptions() -> None:
try:
await coro_or_task
except Unauthorized:
_LOGGER.warning(
"Unauthorized service called %s/%s",
service_call.domain,
service_call.service,
)
except asyncio.CancelledError:
_LOGGER.debug("Service was cancelled: %s", service_call)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error executing service: %s", service_call)
self._hass.async_create_task(catch_exceptions())
async def _execute_service(
self, handler: Service, service_call: ServiceCall
) -> None:
"""Execute a service."""
if handler.job.job_type == HassJobType.Coroutinefunction:
await handler.job.target(service_call)
elif handler.job.job_type == HassJobType.Callback:
handler.job.target(service_call)
else:
await self._hass.async_add_executor_job(handler.job.target, service_call)
class Config:
"""Configuration settings for Home Assistant."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize a new config object."""
self.hass = hass
self.latitude: float = 0
self.longitude: float = 0
self.elevation: int = 0
self.location_name: str = "Home"
self.time_zone: str = "UTC"
self.units: UnitSystem = METRIC_SYSTEM
self.internal_url: str | None = None
self.external_url: str | None = None
self.config_source: str = "default"
# If True, pip install is skipped for requirements on startup
self.skip_pip: bool = False
# List of loaded components
self.components: set[str] = set()
# API (HTTP) server configuration, see components.http.ApiConfig
self.api: Any | None = None
# Directory that holds the configuration
self.config_dir: str | None = None
# List of allowed external dirs to access
self.allowlist_external_dirs: set[str] = set()
# List of allowed external URLs that integrations may use
self.allowlist_external_urls: set[str] = set()
# Dictionary of Media folders that integrations may use
self.media_dirs: dict[str, str] = {}
# If Home Assistant is running in safe mode
self.safe_mode: bool = False
# Use legacy template behavior
self.legacy_templates: bool = False
def distance(self, lat: float, lon: float) -> float | None:
"""Calculate distance from Home Assistant.
Async friendly.
"""
return self.units.length(
location.distance(self.latitude, self.longitude, lat, lon), LENGTH_METERS
)
def path(self, *path: str) -> str:
"""Generate path to the file within the configuration directory.
Async friendly.
"""
if self.config_dir is None:
raise HomeAssistantError("config_dir is not set")
return os.path.join(self.config_dir, *path)
def is_allowed_external_url(self, url: str) -> bool:
"""Check if an external URL is allowed."""
parsed_url = f"{str(yarl.URL(url))}/"
return any(
allowed
for allowed in self.allowlist_external_urls
if parsed_url.startswith(allowed)
)
def is_allowed_path(self, path: str) -> bool:
"""Check if the path is valid for access from outside."""
assert path is not None
thepath = pathlib.Path(path)
try:
# The file path does not have to exist (it's parent should)
if thepath.exists():
thepath = thepath.resolve()
else:
thepath = thepath.parent.resolve()
except (FileNotFoundError, RuntimeError, PermissionError):
return False
for allowed_path in self.allowlist_external_dirs:
try:
thepath.relative_to(allowed_path)
return True
except ValueError:
pass
return False
def as_dict(self) -> dict:
"""Create a dictionary representation of the configuration.
Async friendly.
"""
return {
"latitude": self.latitude,
"longitude": self.longitude,
"elevation": self.elevation,
"unit_system": self.units.as_dict(),
"location_name": self.location_name,
"time_zone": self.time_zone,
"components": self.components,
"config_dir": self.config_dir,
# legacy, backwards compat
"whitelist_external_dirs": self.allowlist_external_dirs,
"allowlist_external_dirs": self.allowlist_external_dirs,
"allowlist_external_urls": self.allowlist_external_urls,
"version": __version__,
"config_source": self.config_source,
"safe_mode": self.safe_mode,
"state": self.hass.state.value,
"external_url": self.external_url,
"internal_url": self.internal_url,
}
def set_time_zone(self, time_zone_str: str) -> None:
"""Help to set the time zone."""
time_zone = dt_util.get_time_zone(time_zone_str)
if time_zone:
self.time_zone = time_zone_str
dt_util.set_default_time_zone(time_zone)
else:
raise ValueError(f"Received invalid time zone {time_zone_str}")
@callback
def _update(
self,
*,
source: str,
latitude: float | None = None,
longitude: float | None = None,
elevation: int | None = None,
unit_system: str | None = None,
location_name: str | None = None,
time_zone: str | None = None,
# pylint: disable=dangerous-default-value # _UNDEFs not modified
external_url: str | dict | None = _UNDEF,
internal_url: str | dict | None = _UNDEF,
) -> None:
"""Update the configuration from a dictionary."""
self.config_source = source
if latitude is not None:
self.latitude = latitude
if longitude is not None:
self.longitude = longitude
if elevation is not None:
self.elevation = elevation
if unit_system is not None:
if unit_system == CONF_UNIT_SYSTEM_IMPERIAL:
self.units = IMPERIAL_SYSTEM
else:
self.units = METRIC_SYSTEM
if location_name is not None:
self.location_name = location_name
if time_zone is not None:
self.set_time_zone(time_zone)
if external_url is not _UNDEF:
self.external_url = cast(Optional[str], external_url)
if internal_url is not _UNDEF:
self.internal_url = cast(Optional[str], internal_url)
async def async_update(self, **kwargs: Any) -> None:
"""Update the configuration from a dictionary."""
self._update(source=SOURCE_STORAGE, **kwargs)
await self.async_store()
self.hass.bus.async_fire(EVENT_CORE_CONFIG_UPDATE, kwargs)
async def async_load(self) -> None:
"""Load [homeassistant] core config."""
store = self.hass.helpers.storage.Store(
CORE_STORAGE_VERSION, CORE_STORAGE_KEY, private=True
)
data = await store.async_load()
if data:
self._update(
source=SOURCE_STORAGE,
latitude=data.get("latitude"),
longitude=data.get("longitude"),
elevation=data.get("elevation"),
unit_system=data.get("unit_system"),
location_name=data.get("location_name"),
time_zone=data.get("time_zone"),
external_url=data.get("external_url", _UNDEF),
internal_url=data.get("internal_url", _UNDEF),
)
async def async_store(self) -> None:
"""Store [homeassistant] core config."""
data = {
"latitude": self.latitude,
"longitude": self.longitude,
"elevation": self.elevation,
"unit_system": self.units.name,
"location_name": self.location_name,
"time_zone": self.time_zone,
"external_url": self.external_url,
"internal_url": self.internal_url,
}
store = self.hass.helpers.storage.Store(
CORE_STORAGE_VERSION, CORE_STORAGE_KEY, private=True
)
await store.async_save(data)
def _async_create_timer(hass: HomeAssistant) -> None:
"""Create a timer that will start on HOMEASSISTANT_START."""
handle = None
timer_context = Context()
def schedule_tick(now: datetime.datetime) -> None:
"""Schedule a timer tick when the next second rolls around."""
nonlocal handle
slp_seconds = 1 - (now.microsecond / 10 ** 6)
target = monotonic() + slp_seconds
handle = hass.loop.call_later(slp_seconds, fire_time_event, target)
@callback
def fire_time_event(target: float) -> None:
"""Fire next time event."""
now = dt_util.utcnow()
hass.bus.async_fire(
EVENT_TIME_CHANGED, {ATTR_NOW: now}, time_fired=now, context=timer_context
)
# If we are more than a second late, a tick was missed
late = monotonic() - target
if late > 1:
hass.bus.async_fire(
EVENT_TIMER_OUT_OF_SYNC,
{ATTR_SECONDS: late},
time_fired=now,
context=timer_context,
)
schedule_tick(now)
@callback
def stop_timer(_: Event) -> None:
"""Stop the timer."""
if handle is not None:
handle.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_timer)
_LOGGER.info("Timer:starting")
schedule_tick(dt_util.utcnow())
|
{
"content_hash": "238d7360309d8b71e612b1457d61b73a",
"timestamp": "",
"source": "github",
"line_count": 1790,
"max_line_length": 101,
"avg_line_length": 33.12122905027933,
"alnum_prop": 0.5878523116366151,
"repo_name": "kennedyshead/home-assistant",
"id": "b9bf97e7e6c8209bc77daf02dc40dd6b3df1a3cd",
"size": "59287",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "33970989"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
}
|
"""
These are the core functions/classes for interacting with NetworkX.
"""
from itertools import chain
import networkx as nx
def reset_index(graph):
"""
This clobbers your nodes, best protect them.
:param graph: networx.Graph
:returns: networkx.Graph
"""
mapping = dict(zip(graph.nodes(), range(0, graph.number_of_nodes())))
# Change nodes to integers.
graph = nx.relabel_nodes(graph, mapping)
return graph
def match(node_type_seq, edge_type_seq, graph, node_alias=None,
node_type_attr="type", edge_type_attr="type"):
"""
Executes traversals to perform initial match on pattern.
:param graph: networkx.Graph
:returns: List of lists. The matched paths.
"""
start_type = node_type_seq[0]
path_list = []
for node, attrs in graph.nodes(data=True):
if attrs[node_type_attr] == start_type or not start_type:
paths = traverse(node, node_type_seq[1:], edge_type_seq, graph,
node_alias, node_type_attr, edge_type_attr)
path_list.append(paths)
paths = chain.from_iterable(path_list)
return paths
def project(source, target, graph, method="jaccard", params=None, attrs=None,
node_type_attr="type", edge_type_attr="type"):
"""
Executes graph "PROJECT" projection.
:param source: Int. Source node for transformation.
:param target: Int. Target node for transformation.
:param attrs: Dict. Attrs to be set during transformation.
:param graph: networkx.Graph. Graph of subgraph to transform.
:returns: networkx.Graph. A projected copy of the wrapped graph
or its subgraph.
"""
if params is None:
params = []
if attrs is None:
attrs = {}
if method in ["jaccard", "newman"]:
snbrs = {node for node in graph[source].keys()
if graph.node[node][node_type_attr] in params}
tnbrs = {node for node in graph[target].keys()
if graph.node[node][node_type_attr] in params}
intersect = snbrs & tnbrs
if method == "jaccard":
union = snbrs | tnbrs
weight = float(len(intersect)) / len(union)
elif method == "newman":
weight = sum([1.0 / (len(graph[n]) - 1) for n in intersect
if len(graph[n]) > 1])
attrs["weight"] = weight
if graph.has_edge(source, target):
edge_attrs = graph[source][target]
merged_attrs = merge_attrs(attrs, edge_attrs,
[edge_type_attr, "weight", "label"])
graph.adj[source][target] = merged_attrs
graph.adj[target][source] = merged_attrs
else:
graph.add_edge(source, target, attrs)
return graph
def transfer(source, target, graph, method="edges", params=None, attrs=None,
node_type_attr="type", edge_type_attr="type", **kwargs):
"""
Execute a graph "TRANSFER" projection.
:param source: Int. Source node for transformation.
:param target: Int. Target node for transformation.
:param attrs: Dict. Attrs to be set during transformation.
:param graph: networkx.Graph. Graph of subgraph to transform.
:returns: networkx.Graph. A projected copy of the wrapped graph
or its subgraph.
"""
if params is None:
params = []
if attrs is None:
attrs = {}
if method == "edges":
nbrs = {k: v for (k, v) in graph[source].items()
if graph.node[k][node_type_attr] in params}
edges = zip([target] * len(nbrs), nbrs,
[v for (k, v) in nbrs.items()])
graph = _add_edges_from(graph, edges)
old_attrs = graph.node[target]
merged_attrs = merge_attrs(attrs, old_attrs,
[node_type_attr, "label", "role"])
graph.node[target] = merged_attrs
return graph
def combine(source, target, graph, node_id="", attrs=None,
node_type_attr="type", edge_type_attr="type"):
"""
Executes graph "COMBINE" projection.
:param source: Int. Source node for transformation.
:param target: Int. Target node for transformation.
:param attrs: Dict. Attrs to be set during transformation.
:param graph: networkx.Graph. Graph of subgraph to transform.
:param node_id: Int. Id for new node, will autoassign, but
:returns: networkx.Graph. A projected copy of the wrapped graph
or its subgraph.
"""
if attrs is None:
attrs = {}
if not node_id:
try:
node_id = max(graph.nodes())
except:
raise Exception("Please specify a kwarg 'node_id'")
node_type = attrs.get(node_type_attr, "")
if not node_type:
node_type = "{0}_{1}".format(
graph.node[source][node_type_attr],
graph.node[target][node_type_attr]
)
attrs[node_type_attr] = node_type
graph.add_node(node_id, attrs)
nbrs = dict(graph[source])
nbrs.update(dict(graph[target]))
nbrs = {k: v for (k, v) in nbrs.items()
if graph.node[k][node_type_attr] != node_type}
edges = zip([node_id] * len(nbrs), nbrs,
[v for (_, v) in nbrs.items()])
graph = _add_edges_from(graph, edges)
return graph
def traverse(start, node_type_seq, edge_type_seq, graph,
node_alias=None, node_type_attr="type", edge_type_attr="type"):
"""
This is a controlled depth, depth first traversal of a NetworkX
graph and the core of this library. Criteria for searching depends
on a start node and a sequence of types as designated by the node/edge
type seq. It does not allow cycles or backtracking. Could be very memory
inefficient in very dense graph with 3 + type queries.
:param start: Integer. Starting point for the traversal.
:param node_type_seq: List of strings. Derived from the match pattern.
:param node_type_seq: List of strings. Derived from the match pattern.
:param graph: networkx.Graph
:returns: List of lists. All matched paths.
"""
# Initialize a stack to keep
# track of traversal progress.
stack = [start]
# Store all valid paths based on type sequence.
paths = []
# Keep track of traversal moves to avoid cycles.
visited_from = {}
# The traversal will begin at the designated start point.
current = start
# Track depth from start node to watch for successful sequence match.
depth = 0
# This is the len of a successful sequence.
max_depth = len(node_type_seq)
# When the stack runs out, all candidate nodes have been visited.
while len(stack) > 0:
# Traverse!
if depth < max_depth:
nbrs = set(graph[current]) - set([current])
for nbr in nbrs:
edge_type = graph[current][nbr].get(
edge_type_attr,
None
)
attrs = graph.node[nbr]
# Here check candidate node validity.
# Make sure this path hasn"t been checked already.
# Make sure it matches the type sequence.
# Make sure it"s not backtracking on same path.
visited_from.setdefault(nbr, [])
if (current not in visited_from[nbr] and
nbr not in stack and
(edge_type == edge_type_seq[depth] or
edge_type_seq[depth] == "") and
(attrs[node_type_attr] == node_type_seq[depth]
or node_type_seq[depth] == "")):
visited_from[nbr].append(current)
# Continue traversal at next depth.
current = nbr
stack.append(current)
depth += 1
break
# If no valid nodes are available from
# this position, backtrack.
else:
stack.pop()
if len(stack) > 0:
current = stack[-1]
depth -= 1
# If max depth reached, store the
# valid node sequence.
else:
path = list(stack)
if node_alias:
path = Record(path, node_alias)
paths.append(path)
# Backtrack and keep checking.
stack.pop()
current = stack[-1]
depth -= 1
return paths
def build_subgraph(paths, graph, records=False):
"""
Takes the paths returned by match and builds a graph.
:param paths: List of lists.
:returns: networkx.Graph. Matched sugraph.
"""
g = nx.Graph()
for path in paths:
if records:
path = path._list
combined_paths = _combine_paths(path)
for edges in combined_paths:
attrs = graph[edges[0]][edges[1]]
g.add_edge(edges[0], edges[1], attrs)
for node in g.nodes():
g.node[node] = dict(graph.node[node])
return g
def merge_attrs(new_attrs, old_attrs, reserved=[]):
"""
Merges attributes counting repeated attrs with dicts.
Kind of ugly, will need to take a look at this.
:param new_attrs: Dict.
:param old_attrs: Dict.
:reserved: List. A list of attributes that cannot have more than value.
:returns: Dict.
"""
attrs = {}
for k, v in old_attrs.items():
if k in reserved:
attrs[k] = v
elif isinstance(v, dict):
attrs[k] = dict(v)
elif isinstance(v, str) or isinstance(v, unicode):
attrs[k] = {v: 1}
for k, v in new_attrs.items():
if k in reserved:
attrs[k] = v
elif k in attrs:
count_dict = attrs[k]
if isinstance(v, dict):
for i, j in v.items():
count_dict.setdefault(i, 0)
count_dict[i] += j
elif isinstance(v, str) or isinstance(v, unicode):
count_dict.setdefault(v, 0)
count_dict[v] += 1
attrs[k] = count_dict
else:
if isinstance(v, dict):
attrs[k] = dict(v)
elif isinstance(v, str) or isinstance(v, unicode):
attrs[k] = {v: 1}
return attrs
class NXProjector(object):
def __init__(self, id_counter):
"""
This class holds the info and methods necessary for performing the ETL
actions on a networkx.Graph. It is not a wrapper, and does not store
the actual graph, just operates on it..
:param id_counter: Int. Used to handle combine ids.
"""
self._id_counter = id_counter
self._transformation = {}
self._transformation_init()
def transformation_wrapper(self, verb):
"""
Wraps the transformation methods and adds them to the transformations
dictionary.
:param verb: Str. The ProjX verb assiociated with the wrapped
function.
"""
def wrapper(fn):
self._transformation[verb] = fn
return wrapper
def _get_transformation(self):
"""
Return transformation for transformation property.
:returns: Dict. A dict containing a mapping of verbs to transformation
methods.
"""
return self._transformation
transformations = property(fget=_get_transformation)
def _transformation_init(self):
"""
A series of functions representing transformations. These are
wrapped by the transformation wrapper and added to the transformations
dict. Later during the parsing and execution phase these are called as
pointers to the various graph transformation methods
(transfer and project).
"""
@self.transformation_wrapper("project")
def execute_project(source, target, graph, attrs, node_type_attr,
edge_type_attr, **kwargs):
method = kwargs.get("method", {})
params = kwargs.get("params", [])
return project(source, target, graph, method, params, attrs,
node_type_attr, edge_type_attr)
@self.transformation_wrapper("transfer")
def execute_transfer(source, target, graph, attrs, node_type_attr,
edge_type_attr, **kwargs):
method = kwargs.get("method", {})
params = kwargs.get("params", [])
return transfer(source, target, graph, method, params, attrs,
node_type_attr, edge_type_attr)
@self.transformation_wrapper("combine")
def execute_combine(source, target, graph, attrs, node_type_attr,
edge_type_attr, **kwargs):
self._id_counter += 1
node_id = int(self._id_counter)
return combine(source, target, graph, node_id, attrs,
node_type_attr, edge_type_attr)
class Record(object):
def __init__(self, path, alias):
self._list = path
self._dict = {}
for i in range(len(path)):
self._dict[alias[i]] = path[i]
def __getitem__(self, item):
if isinstance(item, str):
return self._dict[item]
elif isinstance(item, int):
return self._list[item]
else:
raise Exception("Bad index.")
def _add_edges_from(graph, edges, edge_type_attr="type"):
"""
An alternative to the networkx.Graph.add_edges_from.
Handles non-reserved attributes as sets.
:param graph: networkx.Graph
:param edges: List of tuples. Tuple contains two node ids Int and an
attr Dict.
"""
for source, target, attrs in edges:
if graph.has_edge(source, target):
edge_attrs = graph[source][target]
merged_attrs = merge_attrs(attrs, edge_attrs,
[edge_type_attr, "weight", "label"])
graph.adj[source][target] = merged_attrs
graph.adj[target][source] = merged_attrs
else:
graph.add_edge(source, target, attrs)
return graph
def _combine_paths(path):
"""
Turn path list into edge list.
:param path: List. A list of nodes representing a path.
:returns: List. A list of edge tuples.
"""
edges = []
for i, node in enumerate(path[1:]):
edges.append((path[i], node))
return edges
|
{
"content_hash": "8a5f3dbf4e6ae1764c73dd3ae95518f8",
"timestamp": "",
"source": "github",
"line_count": 398,
"max_line_length": 78,
"avg_line_length": 36.2035175879397,
"alnum_prop": 0.5742244430564231,
"repo_name": "davebshow/projx",
"id": "cf489f598fe8551839d35775b4e0f5184ad69d7c",
"size": "14433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projx/nxprojx.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "27292"
},
{
"name": "HTML",
"bytes": "54034"
},
{
"name": "JavaScript",
"bytes": "1463"
},
{
"name": "Python",
"bytes": "58859"
}
],
"symlink_target": ""
}
|
from django.core.management.base import BaseCommand
import crits.stats.handlers as stats
class Command(BaseCommand):
"""
Script Class.
"""
help = "Runs mapreduces for CRITs."
def handle(self, *args, **options):
"""
Script Execution.
"""
stats.generate_yara_hits()
stats.generate_sources()
stats.generate_filetypes()
stats.generate_filetypes()
stats.generate_campaign_stats()
stats.generate_counts()
stats.target_user_stats()
stats.campaign_date_stats()
|
{
"content_hash": "ee489c5dc7013bca6c6bf1ef222b6a60",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 51,
"avg_line_length": 24.565217391304348,
"alnum_prop": 0.6141592920353982,
"repo_name": "DukeOfHazard/crits",
"id": "0bc0eaaf09e0f25359280a55f09f3b4f8555de30",
"size": "565",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "crits/core/management/commands/mapreduces.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "8694"
},
{
"name": "CSS",
"bytes": "391710"
},
{
"name": "HTML",
"bytes": "456073"
},
{
"name": "JavaScript",
"bytes": "3486649"
},
{
"name": "Python",
"bytes": "1863760"
},
{
"name": "SaltStack",
"bytes": "2981"
},
{
"name": "Shell",
"bytes": "10871"
}
],
"symlink_target": ""
}
|
import csv
from eva_cttv_pipeline.trait_mapping.trait import Trait
def output_trait_mapping(trait: Trait, mapping_writer: csv.writer):
"""
Write any finished ontology mappings for a trait to a csv file writer.
:param trait: A trait with finished ontology mappings in finished_mapping_set
:param mapping_writer: A csv.writer to write the finished mappings
"""
for ontology_entry in trait.finished_mapping_set:
mapping_writer.writerow([trait.name, ontology_entry.uri, ontology_entry.label])
def get_mappings_for_curation(result_list) -> list:
"""Sorted in reverse so the highest ranked oxo mappings are shown first"""
curation_mapping_list = []
for result in result_list:
for mapping in result.mapping_list:
if (mapping.in_efo and mapping.is_current) or (not mapping.in_efo):
curation_mapping_list.append(mapping)
curation_mapping_list.sort(reverse=True)
return curation_mapping_list
def output_for_curation(trait: Trait, curation_writer: csv.writer):
"""
Write any non-finished Zooma or OxO mappings of a trait to a file for manual curation.
Also outputs traits without any ontology mappings.
:param trait: A Trait with no finished ontology mappings in finished_mapping_set
:param curation_writer: A csv.writer to write non-finished ontology mappings for manual curation
"""
# Traits which are associated with NT expansion variants should be prioritised and curated even if the number of
# records they are associated with is low. This is added to the "Notes" column.
output_row = [trait.name, trait.frequency, 'NT expansion' if trait.associated_with_nt_expansion else '']
zooma_mapping_list = get_mappings_for_curation(trait.zooma_result_list)
for zooma_mapping in zooma_mapping_list:
cell = [zooma_mapping.uri, zooma_mapping.ontology_label, str(zooma_mapping.confidence),
zooma_mapping.source, 'EFO_CURRENT' if zooma_mapping.in_efo else 'NOT_CONTAINED']
output_row.append("|".join(cell))
oxo_mapping_list = get_mappings_for_curation(trait.oxo_result_list)
for oxo_mapping in oxo_mapping_list:
cell = [str(oxo_mapping.uri), oxo_mapping.ontology_label, str(oxo_mapping.distance),
oxo_mapping.query_id, 'EFO_CURRENT' if oxo_mapping.in_efo else 'NOT_CONTAINED']
output_row.append("|".join(cell))
curation_writer.writerow(output_row)
def output_trait(trait: Trait, mapping_writer: csv.writer, curation_writer: csv.writer):
"""
Output finished ontology mappings of a trait, or non-finished mappings (if any) for curation.
:param trait: A trait which has been used to query Zooma and possibly OxO.
:param mapping_writer: A csv.writer to write the finished mappings
:param curation_writer: A csv.writer to write non-finished ontology mappings for manual curation
"""
if trait.is_finished:
output_trait_mapping(trait, mapping_writer)
else:
output_for_curation(trait, curation_writer)
|
{
"content_hash": "fad11b42d1ee0371c2506cfcae427d39",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 116,
"avg_line_length": 44.20289855072464,
"alnum_prop": 0.7127868852459016,
"repo_name": "EBIvariation/eva-cttv-pipeline",
"id": "0c3886058285c198ddd0997db40187b6731e7b79",
"size": "3050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eva_cttv_pipeline/trait_mapping/output.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "223694"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.