gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
"""Compatibility module."""
from __future__ import unicode_literals
import sys
import os
import stat
import re
import unicodedata
from functools import wraps
import warnings
from typing import Any, Callable, Tuple, AnyStr, Match, Pattern, Optional, cast
PY37 = (3, 7) <= sys.version_info
PY310 = (3, 10) <= sys.version_info
UNICODE = 0
BYTES = 1
CASE_FS = os.path.normcase('A') != os.path.normcase('a')
RE_NORM = re.compile(
r'''(?x)
(/|\\/)|
(\\[abfnrtv\\])|
(\\(?:U[\da-fA-F]{8}|u[\da-fA-F]{4}|x[\da-fA-F]{2}|([0-7]{1,3})))|
(\\N\{[^}]*?\})|
(\\[^NUux]) |
(\\[NUux])
'''
)
RE_BNORM = re.compile(
br'''(?x)
(/|\\/)|
(\\[abfnrtv\\])|
(\\(?:x[\da-fA-F]{2}|([0-7]{1,3})))|
(\\[^x]) |
(\\[x])
'''
)
BACK_SLASH_TRANSLATION = {
r"\a": '\a',
r"\b": '\b',
r"\f": '\f',
r"\r": '\r',
r"\t": '\t',
r"\n": '\n',
r"\v": '\v',
r"\\": r'\\',
br"\a": b'\a',
br"\b": b'\b',
br"\f": b'\f',
br"\r": b'\r',
br"\t": b'\t',
br"\n": b'\n',
br"\v": b'\v',
br"\\": br'\\'
}
if sys.platform.startswith('win'):
_PLATFORM = "windows"
elif sys.platform == "darwin": # pragma: no cover
_PLATFORM = "osx"
else:
_PLATFORM = "linux"
def platform() -> str:
"""Get platform."""
return _PLATFORM
def is_case_sensitive() -> bool:
"""Check if case sensitive."""
return CASE_FS
def norm_pattern(pattern: AnyStr, normalize: Optional[bool], is_raw_chars: bool, ignore_escape: bool = False) -> AnyStr:
r"""
Normalize pattern.
- For windows systems we want to normalize slashes to \.
- If raw string chars is enabled, we want to also convert
encoded string chars to literal characters.
- If `normalize` is enabled, take care to convert \/ to \\\\.
"""
if isinstance(pattern, bytes):
is_bytes = True
slash = b'\\'
multi_slash = slash * 4
pat = RE_BNORM
else:
is_bytes = False
slash = '\\'
multi_slash = slash * 4
pat = RE_NORM
if not normalize and not is_raw_chars and not ignore_escape:
return pattern
def norm(m: Match[AnyStr]) -> AnyStr:
"""Normalize the pattern."""
if m.group(1):
char = m.group(1)
if normalize and len(char) > 1:
char = multi_slash
elif m.group(2):
char = cast(AnyStr, BACK_SLASH_TRANSLATION[m.group(2)] if is_raw_chars else m.group(2))
elif is_raw_chars and m.group(4):
char = cast(AnyStr, bytes([int(m.group(4), 8) & 0xFF]) if is_bytes else chr(int(m.group(4), 8)))
elif is_raw_chars and m.group(3):
char = cast(AnyStr, bytes([int(m.group(3)[2:], 16)]) if is_bytes else chr(int(m.group(3)[2:], 16)))
elif is_raw_chars and not is_bytes and m.group(5):
char = unicodedata.lookup(m.group(5)[3:-1]) # type: ignore[assignment]
elif not is_raw_chars or m.group(5 if is_bytes else 6):
char = m.group(0)
if ignore_escape:
char = slash + char
else:
value = m.group(6) if is_bytes else m.group(7)
pos = m.start(6) if is_bytes else m.start(7)
raise SyntaxError("Could not convert character value {!r} at position {:d}".format(value, pos))
return char
return pat.sub(norm, pattern)
class StringIter:
"""Preprocess replace tokens."""
def __init__(self, string: str) -> None:
"""Initialize."""
self._string = string
self._index = 0
def __iter__(self) -> "StringIter":
"""Iterate."""
return self
def __next__(self) -> str:
"""Python 3 iterator compatible next."""
return self.iternext()
def match(self, pattern: Pattern[str]) -> Optional[Match[str]]:
"""Perform regex match at index."""
m = pattern.match(self._string, self._index)
if m:
self._index = m.end()
return m
@property
def index(self) -> int:
"""Get current index."""
return self._index
def previous(self) -> str: # pragma: no cover
"""Get previous char."""
return self._string[self._index - 1]
def advance(self, count: int) -> None: # pragma: no cover
"""Advanced the index."""
self._index += count
def rewind(self, count: int) -> None:
"""Rewind index."""
if count > self._index: # pragma: no cover
raise ValueError("Can't rewind past beginning!")
self._index -= count
def iternext(self) -> str:
"""Iterate through characters of the string."""
try:
char = self._string[self._index]
self._index += 1
except IndexError: # pragma: no cover
raise StopIteration
return char
class Immutable:
"""Immutable."""
__slots__: Tuple[Any, ...] = tuple()
def __init__(self, **kwargs: Any) -> None:
"""Initialize."""
for k, v in kwargs.items():
super(Immutable, self).__setattr__(k, v)
def __setattr__(self, name: str, value: Any) -> None: # pragma: no cover
"""Prevent mutability."""
raise AttributeError('Class is immutable!')
def is_hidden(path: AnyStr) -> bool:
"""Check if file is hidden."""
hidden = False
f = os.path.basename(path)
if f[:1] in ('.', b'.'):
# Count dot file as hidden on all systems
hidden = True
elif sys.platform == 'win32':
# On Windows, look for `FILE_ATTRIBUTE_HIDDEN`
results = os.lstat(path)
FILE_ATTRIBUTE_HIDDEN = 0x2
hidden = bool(results.st_file_attributes & FILE_ATTRIBUTE_HIDDEN)
elif sys.platform == "darwin": # pragma: no cover
# On macOS, look for `UF_HIDDEN`
results = os.lstat(path)
hidden = bool(results.st_flags & stat.UF_HIDDEN)
return hidden
def deprecated(message: str, stacklevel: int = 2) -> Callable[..., Any]: # pragma: no cover
"""
Raise a `DeprecationWarning` when wrapped function/method is called.
Usage:
@deprecated("This method will be removed in version X; use Y instead.")
def some_method()"
pass
"""
def _wrapper(func: Callable[..., Any]) -> Callable[..., Any]:
@wraps(func)
def _deprecated_func(*args: Any, **kwargs: Any) -> Any:
warnings.warn(
f"'{func.__name__}' is deprecated. {message}",
category=DeprecationWarning,
stacklevel=stacklevel
)
return func(*args, **kwargs)
return _deprecated_func
return _wrapper
def warn_deprecated(message: str, stacklevel: int = 2) -> None: # pragma: no cover
"""Warn deprecated."""
warnings.warn(
message,
category=DeprecationWarning,
stacklevel=stacklevel
)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations to emit summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import getpass
import os
import re
import time
import six
from tensorflow.contrib.summary import gen_summary_ops
from tensorflow.core.framework import graph_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import summary_op_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
from tensorflow.python.util import tf_contextlib
# Name for a collection which is expected to have at most a single boolean
# Tensor. If this tensor is True the summary ops will record summaries.
_SHOULD_RECORD_SUMMARIES_NAME = "ShouldRecordSummaries"
_SUMMARY_WRITER_INIT_COLLECTION_NAME = "_SUMMARY_WRITER_V2"
_EXPERIMENT_NAME_PATTERNS = re.compile(r"^[^\x00-\x1F<>]{0,256}$")
_RUN_NAME_PATTERNS = re.compile(r"^[^\x00-\x1F<>]{0,512}$")
_USER_NAME_PATTERNS = re.compile(r"^[a-z]([-a-z0-9]{0,29}[a-z0-9])?$", re.I)
def should_record_summaries():
"""Returns boolean Tensor which is true if summaries should be recorded."""
should_record_collection = ops.get_collection(_SHOULD_RECORD_SUMMARIES_NAME)
if not should_record_collection:
return False
if len(should_record_collection) != 1:
raise ValueError(
"More than one tensor specified for whether summaries "
"should be recorded: %s" % should_record_collection)
return should_record_collection[0]
# TODO(apassos) consider how to handle local step here.
@tf_contextlib.contextmanager
def record_summaries_every_n_global_steps(n, global_step=None):
"""Sets the should_record_summaries Tensor to true if global_step % n == 0."""
if global_step is None:
global_step = training_util.get_global_step()
collection_ref = ops.get_collection_ref(_SHOULD_RECORD_SUMMARIES_NAME)
old = collection_ref[:]
with ops.device("cpu:0"):
collection_ref[:] = [math_ops.equal(global_step % n, 0)]
yield
collection_ref[:] = old
@tf_contextlib.contextmanager
def always_record_summaries():
"""Sets the should_record_summaries Tensor to always true."""
collection_ref = ops.get_collection_ref(_SHOULD_RECORD_SUMMARIES_NAME)
old = collection_ref[:]
collection_ref[:] = [True]
yield
collection_ref[:] = old
@tf_contextlib.contextmanager
def never_record_summaries():
"""Sets the should_record_summaries Tensor to always false."""
collection_ref = ops.get_collection_ref(_SHOULD_RECORD_SUMMARIES_NAME)
old = collection_ref[:]
collection_ref[:] = [False]
yield
collection_ref[:] = old
class SummaryWriter(object):
"""Encapsulates a stateful summary writer resource.
See also:
- @{tf.contrib.summary.create_file_writer}
- @{tf.contrib.summary.create_db_writer}
"""
def __init__(self, resource):
self._resource = resource
if context.in_eager_mode():
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._resource, handle_device="cpu:0")
def set_as_default(self):
"""Enables this summary writer for the current thread."""
context.context().summary_writer_resource = self._resource
@tf_contextlib.contextmanager
def as_default(self):
"""Enables summary writing within a `with` block."""
if self._resource is None:
yield self
else:
old = context.context().summary_writer_resource
context.context().summary_writer_resource = self._resource
yield self
# Flushes the summary writer in eager mode or in graph functions, but not
# in legacy graph mode (you're on your own there).
with ops.device("cpu:0"):
gen_summary_ops.flush_summary_writer(self._resource)
context.context().summary_writer_resource = old
def initialize(
graph=None, # pylint: disable=redefined-outer-name
session=None):
"""Initializes summary writing for graph execution mode.
This helper method provides a higher-level alternative to using
@{tf.contrib.summary.summary_writer_initializer_op} and
@{tf.contrib.summary.graph}.
Most users will also want to call @{tf.train.create_global_step}
which can happen before or after this function is called.
Args:
graph: A @{tf.Graph} or @{tf.GraphDef} to output to the writer.
This function will not write the default graph by default. When
writing to an event log file, the associated step will be zero.
session: So this method can call @{tf.Session.run}. This defaults
to @{tf.get_default_session}.
Raises:
RuntimeError: If in eager mode, or if the current thread has no
default @{tf.contrib.summary.SummaryWriter}.
ValueError: If session wasn't passed and no default session.
"""
if context.context().summary_writer_resource is None:
raise RuntimeError("No default tf.contrib.summary.SummaryWriter found")
if session is None:
session = ops.get_default_session()
if session is None:
raise ValueError("session must be passed if no default session exists")
session.run(summary_writer_initializer_op())
if graph is not None:
data = _serialize_graph(graph)
x = array_ops.placeholder(dtypes.string)
session.run(_graph(x, 0), feed_dict={x: data})
def create_file_writer(logdir,
max_queue=None,
flush_millis=None,
filename_suffix=None,
name=None):
"""Creates a summary file writer in the current context.
Args:
logdir: a string, or None. If a string, creates a summary file writer
which writes to the directory named by the string. If None, returns
a mock object which acts like a summary writer but does nothing,
useful to use as a context manager.
max_queue: the largest number of summaries to keep in a queue; will
flush once the queue gets bigger than this.
flush_millis: the largest interval between flushes.
filename_suffix: optional suffix for the event file name.
name: Shared name for this SummaryWriter resource stored to default
Graph.
Returns:
Either a summary writer or an empty object which can be used as a
summary writer.
"""
if logdir is None:
return SummaryWriter(None)
with ops.device("cpu:0"):
if max_queue is None:
max_queue = constant_op.constant(10)
if flush_millis is None:
flush_millis = constant_op.constant(2 * 60 * 1000)
if filename_suffix is None:
filename_suffix = constant_op.constant("")
return _make_summary_writer(
name,
gen_summary_ops.create_summary_file_writer,
logdir=logdir,
max_queue=max_queue,
flush_millis=flush_millis,
filename_suffix=filename_suffix)
def create_db_writer(db_uri,
experiment_name=None,
run_name=None,
user_name=None,
name=None):
"""Creates a summary database writer in the current context.
This can be used to write tensors from the execution graph directly
to a database. Only SQLite is supported right now. This function
will create the schema if it doesn't exist. Entries in the Users,
Experiments, and Runs tables will be created automatically if they
don't already exist.
Args:
db_uri: For example "file:/tmp/foo.sqlite".
experiment_name: Defaults to YYYY-MM-DD in local time if None.
Empty string means the Run will not be associated with an
Experiment. Can't contain ASCII control characters or <>. Case
sensitive.
run_name: Defaults to HH:MM:SS in local time if None. Empty string
means a Tag will not be associated with any Run. Can't contain
ASCII control characters or <>. Case sensitive.
user_name: Defaults to system username if None. Empty means the
Experiment will not be associated with a User. Must be valid as
both a DNS label and Linux username.
name: Shared name for this SummaryWriter resource stored to default
@{tf.Graph}.
Returns:
A @{tf.contrib.summary.SummaryWriter} instance.
"""
with ops.device("cpu:0"):
if experiment_name is None:
experiment_name = time.strftime("%Y-%m-%d", time.localtime(time.time()))
if run_name is None:
run_name = time.strftime("%H:%M:%S", time.localtime(time.time()))
if user_name is None:
user_name = getpass.getuser()
experiment_name = _cleanse_string(
"experiment_name", _EXPERIMENT_NAME_PATTERNS, experiment_name)
run_name = _cleanse_string("run_name", _RUN_NAME_PATTERNS, run_name)
user_name = _cleanse_string("user_name", _USER_NAME_PATTERNS, user_name)
return _make_summary_writer(
name,
gen_summary_ops.create_summary_db_writer,
db_uri=db_uri,
experiment_name=experiment_name,
run_name=run_name,
user_name=user_name)
def _make_summary_writer(name, factory, **kwargs):
resource = gen_summary_ops.summary_writer(shared_name=name)
# TODO(apassos): Consider doing this instead.
# node = factory(resource, **kwargs)
# if not context.in_eager_mode():
# ops.get_default_session().run(node)
ops.add_to_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME,
factory(resource, **kwargs))
return SummaryWriter(resource)
def _cleanse_string(name, pattern, value):
if isinstance(value, six.string_types) and pattern.search(value) is None:
raise ValueError("%s (%s) must match %s" % (name, value, pattern.pattern))
return ops.convert_to_tensor(value, dtypes.string)
def _nothing():
"""Convenient else branch for when summaries do not record."""
return constant_op.constant(False)
def all_summary_ops():
"""Graph-mode only. Returns all summary ops.
Please note this excludes @{tf.contrib.summary.graph} ops.
Returns:
The summary ops.
Raises:
RuntimeError: If in Eager mode.
"""
if context.in_eager_mode():
raise RuntimeError(
"tf.contrib.summary.all_summary_ops is only supported in graph mode.")
return ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
def summary_writer_initializer_op():
"""Graph-mode only. Returns the list of ops to create all summary writers.
Returns:
The initializer ops.
Raises:
RuntimeError: If in Eager mode.
"""
if context.in_eager_mode():
raise RuntimeError(
"tf.contrib.summary.summary_writer_initializer_op is only "
"supported in graph mode.")
return ops.get_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME)
def summary_writer_function(name, tensor, function, family=None):
"""Helper function to write summaries.
Args:
name: name of the summary
tensor: main tensor to form the summary
function: function taking a tag and a scope which writes the summary
family: optional, the summary's family
Returns:
The result of writing the summary.
"""
def record():
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
with ops.control_dependencies([function(tag, scope)]):
return constant_op.constant(True)
if context.context().summary_writer_resource is None:
return control_flow_ops.no_op()
with ops.device("cpu:0"):
op = utils.smart_cond(
should_record_summaries(), record, _nothing, name="")
ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access
return op
def generic(name, tensor, metadata=None, family=None, step=None):
"""Writes a tensor summary if possible."""
def function(tag, scope):
if metadata is None:
serialized_metadata = constant_op.constant("")
elif hasattr(metadata, "SerializeToString"):
serialized_metadata = constant_op.constant(metadata.SerializeToString())
else:
serialized_metadata = metadata
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_summary(
context.context().summary_writer_resource,
_choose_step(step),
array_ops.identity(tensor),
tag,
serialized_metadata,
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def scalar(name, tensor, family=None, step=None):
"""Writes a scalar summary if possible.
Unlike @{tf.contrib.summary.generic} this op may change the dtype
depending on the writer, for both practical and efficiency concerns.
Args:
name: An arbitrary name for this summary.
tensor: A @{tf.Tensor} Must be one of the following types:
`float32`, `float64`, `int32`, `int64`, `uint8`, `int16`,
`int8`, `uint16`, `half`, `uint32`, `uint64`.
family: Optional, the summary's family.
step: The `int64` monotonic step variable, which defaults
to @{tf.train.get_global_step}.
Returns:
The created @{tf.Operation} or a @{tf.no_op} if summary writing has
not been enabled for this context.
"""
def function(tag, scope):
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_scalar_summary(
context.context().summary_writer_resource,
_choose_step(step),
tag,
array_ops.identity(tensor),
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def histogram(name, tensor, family=None, step=None):
"""Writes a histogram summary if possible."""
def function(tag, scope):
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_histogram_summary(
context.context().summary_writer_resource,
_choose_step(step),
tag,
array_ops.identity(tensor),
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def image(name, tensor, bad_color=None, max_images=3, family=None, step=None):
"""Writes an image summary if possible."""
def function(tag, scope):
bad_color_ = (constant_op.constant([255, 0, 0, 255], dtype=dtypes.uint8)
if bad_color is None else bad_color)
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_image_summary(
context.context().summary_writer_resource,
_choose_step(step),
tag,
array_ops.identity(tensor),
bad_color_,
max_images,
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def audio(name, tensor, sample_rate, max_outputs, family=None, step=None):
"""Writes an audio summary if possible."""
def function(tag, scope):
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_audio_summary(
context.context().summary_writer_resource,
_choose_step(step),
tag,
array_ops.identity(tensor),
sample_rate=sample_rate,
max_outputs=max_outputs,
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def graph(param, step=None, name=None):
"""Writes a TensorFlow graph to the summary interface.
The graph summary is, strictly speaking, not a summary. Conditions
like @{tf.contrib.summary.never_record_summaries} do not apply. Only
a single graph can be associated with a particular run. If multiple
graphs are written, then only the last one will be considered by
TensorBoard.
When not using eager execution mode, the user should consider passing
the `graph` parameter to @{tf.contrib.summary.initialize} instead of
calling this function. Otherwise special care needs to be taken when
using the graph to record the graph.
Args:
param: A @{tf.Tensor} containing a serialized graph proto. When
eager execution is enabled, this function will automatically
coerce @{tf.Graph}, @{tf.GraphDef}, and string types.
step: The global step variable. This doesn't have useful semantics
for graph summaries, but is used anyway, due to the structure of
event log files. This defaults to the global step.
name: A name for the operation (optional).
Returns:
The created @{tf.Operation} or a @{tf.no_op} if summary writing has
not been enabled for this context.
Raises:
TypeError: If `param` isn't already a @{tf.Tensor} in graph mode.
"""
if not context.in_eager_mode() and not isinstance(param, ops.Tensor):
raise TypeError("graph() needs a tf.Tensor (e.g. tf.placeholder) in graph "
"mode, but was: %s" % type(param))
writer = context.context().summary_writer_resource
if writer is None:
return control_flow_ops.no_op()
with ops.device("cpu:0"):
if isinstance(param, (ops.Graph, graph_pb2.GraphDef)):
tensor = ops.convert_to_tensor(_serialize_graph(param), dtypes.string)
else:
tensor = array_ops.identity(param)
return gen_summary_ops.write_graph_summary(
writer, _choose_step(step), tensor, name=name)
_graph = graph # for functions with a graph parameter
def import_event(tensor, name=None):
"""Writes a @{tf.Event} binary proto.
When using create_db_writer(), this can be used alongside
@{tf.TFRecordReader} to load event logs into the database. Please
note that this is lower level than the other summary functions and
will ignore any conditions set by methods like
@{tf.contrib.summary.should_record_summaries}.
Args:
tensor: A @{tf.Tensor} of type `string` containing a serialized
@{tf.Event} proto.
name: A name for the operation (optional).
Returns:
The created @{tf.Operation}.
"""
return gen_summary_ops.import_event(
context.context().summary_writer_resource, tensor, name=name)
def flush(writer=None, name=None):
"""Forces summary writer to send any buffered data to storage.
This operation blocks until that finishes.
Args:
writer: The @{tf.contrib.summary.SummaryWriter} resource to flush.
The thread default will be used if this parameter is None.
Otherwise a @{tf.no_op} is returned.
name: A name for the operation (optional).
Returns:
The created @{tf.Operation}.
"""
if writer is None:
writer = context.context().summary_writer_resource
if writer is None:
return control_flow_ops.no_op()
return gen_summary_ops.flush_summary_writer(writer, name=name)
def eval_dir(model_dir, name=None):
"""Construct a logdir for an eval summary writer."""
return os.path.join(model_dir, "eval" if not name else "eval_" + name)
def create_summary_file_writer(*args, **kwargs):
"""Please use @{tf.contrib.summary.create_file_writer}."""
logging.warning("Deprecation Warning: create_summary_file_writer was renamed "
"to create_file_writer")
return create_file_writer(*args, **kwargs)
def _serialize_graph(arbitrary_graph):
if isinstance(arbitrary_graph, ops.Graph):
return arbitrary_graph.as_graph_def(add_shapes=True).SerializeToString()
else:
return arbitrary_graph.SerializeToString()
def _choose_step(step):
if step is None:
return training_util.get_global_step()
if not isinstance(step, ops.Tensor):
return ops.convert_to_tensor(step, dtypes.int64)
return step
|
|
#!/usr/bin/env python
import sys, os, itertools, shutil, getopt, re
import conf
import pdb, traceback
import metacomm.combinatorics.all_pairs2
all_pairs = metacomm.combinatorics.all_pairs2.all_pairs2
def do_Selfcom(self_combin_file,out_file):
try:
file = open(self_combin_file)
allpairs_in = open(out_file,'a+')
while 1:
line = file.readline().replace("null","")
line = line.replace(",,",",")
if (line[-1:]==","):
line = line[:-1]
if not line:
break
allpairs_in.writelines(line + "\n")
file.close()
allpairs_in.close()
return
except Exception,e:
print Exception,":",e
def fileline_count(fp):
return len(open(fp).readlines())
def del_Seed(in_file):
try:
caseline = ""
old_list = []
format_list =[]
de=""
row = 0
file = open(in_file)
items = []
self_file = []
s_name = p_name = ""
if (os.path.isdir("self")):
do_Clear(conf.path +"/self")
os.mkdir(conf.path + "/self")
while 1:
p_name = s_name
line = file.readline()
if not line:
break
line = line.strip('\n\r')
items = line.split(":")
s_name = items[0].split("-")[0]
if ((p_name!=s_name) and (p_name!="")):
fp=open(conf.path + "/self/" + s_name + "_input.txt",'a+')
fp.writelines(line + "\n")
else:
fp= open(conf.path + "/self/" + s_name + "_input.txt",'a+')
fp.writelines(line + "\n")
if (s_name!=p_name):
self_file.append(s_name)
fp.close()
file.close()
if (os.path.isfile(conf.selfcomb_file)):
os.remove(conf.selfcomb_file)
for i in range (0,len(self_file)):
line_count = fileline_count(conf.path + "/self/" + self_file[i] + "_input.txt")
if (line_count >= 2):
lists = [[] for m in range(line_count)]
open_input_file = open(conf.path + "/self/" + self_file[i] + "_input.txt",'a+')
while 1:
line = open_input_file.readline()
if not line:
break
line = line.strip('\n\r')
items = line.split(":")
get_item = items[1].split(",")
get_item1 = get_item
if (str(get_item).find("null"))>1:
for element in range(0,len(get_item)):
if get_item[element]=="null":
old_list = old_list + get_item
for g in get_item:
lists[row].append(g)
row = row + 1
input_pair = all_pairs( lists )
open_input_file.close()
output_pair = open(conf.path + "/self/" + self_file[i] + "_output.txt",'a+')
for e, v in enumerate(input_pair):
for c in range(0,len(v)):
caseline = caseline + v[c]
caseline = caseline.replace("null","") + ","
get_output_item = caseline[:-1].split(",")
get_output_item = old_list + get_output_item
format_list = ','.join(dele_list(get_output_item))
#print "get-----",get_output_item
output_pair.writelines(self_file[i] + ":" + format_list)
output_pair.close()
else:
open_input_file = open(conf.path + "/self/" + self_file[i] + "_input.txt",'r')
output_pair = open(conf.path + "/self/" + self_file[i] + "_output.txt",'a+')
while 1:
line = open_input_file.readline()
if not line:
break
line = line.strip('\n\r')
output_pair.writelines(line)
output_pair.close()
open_input_file .close()
#1*********input_seed -> selfcomb.txt
# if more self combination, each self generate itself output file,finally all self_input generate one selfcomb.txt
do_Selfcom(conf.path + "/self/" + self_file[i] + "_output.txt",conf.selfcomb_file)
row = 0
caseline = ""
format_list=[]
get_output_item = []
old_list = []
#2*********selfcomb -> output file by allpairs
gen_selfcomb_File(conf.selfcomb_file, in_file)
except Exception,e:
print Exception,":",e
print traceback.format_exc()
def dele_list(old_list):
try:
newList = []
for x in old_list:
if x not in newList :
newList.append(x)
return newList
except Exception,e:
print Exception,":",e
print traceback.format_exc()
def gen_selfcomb_File(comb_file,in_file):
try:
open_output_file= open(conf.output_file,'a+')
caseline = ""
get_items = ""
get_case = ""
get_out_put = ""
row = 0
line_count = fileline_count(comb_file)
if (line_count >= 1):
lists = [[] for m in range(line_count)]
open_input_file= open(comb_file)
while 1:
line = open_input_file.readline()
if not line:
break
line = line.strip('\n\r')
items = line.split(":")
get_items = get_items + items[0].split("-")[0] + "\t"
open_output_file.writelines(get_items.rstrip("\t") + "\n")
open_input_file.close()
open_input_file= open(comb_file)
for i in range(0,len(lists)):
line = open_input_file.readline()
if not line:
break
line = line.strip('\n\r')
items = line.split(":")#items[0]=field;#item[1]=value
value = line[len(items[0])+1:]
get_item= value.split(",")
for g in get_item:
lists[row].append(g)
row = row + 1
if len(lists)>1:
input_pair = all_pairs( lists )
for e, v in enumerate(input_pair):
for c in range(0,len(v)):
get_case = get_case + v[c] + "\t"
get_out_put = get_case.replace("null","").replace("comma",",").strip("\t")
open_output_file.writelines(get_out_put + "\n")
#print get_case.replace("null","").rstrip("\t")
get_case=""
open_output_file.close()
return "Generate selfcombination file ------------------------->O.K"
except:
print traceback.format_exc()
def do_Clear(sourceDir):
try:
if (os.path.exists(sourceDir)):
if (os.path.isdir(sourceDir)):
shutil.rmtree(sourceDir)
else:
os.remove(sourceDir)
except IOError,e:
print Exception,"Clear :"+ sourceDir + " ------------------------->error",e
def main():
try:
do_Clear("./output/output.txt")
del_Seed(conf.seed_file)
do_Clear("./self")
except Exception,e:
print Exception,":",e
if __name__=="__main__":
main()
|
|
#!/usr/bin/env python
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Interactive shell based on Django:
#
# Copyright (c) 2005, the Lawrence Journal-World
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
CLI interface for manila management.
"""
from __future__ import print_function
import os
import sys
from manila import i18n
i18n.enable_lazy()
from oslo_config import cfg
from oslo_log import log
from oslo_utils import uuidutils
from manila.common import config # Need to register global_opts # noqa
from manila import context
from manila import db
from manila.db import migration
from manila.i18n import _
from manila import utils
from manila import version
CONF = cfg.CONF
# Decorators for actions
def args(*args, **kwargs):
def _decorator(func):
func.__dict__.setdefault('args', []).insert(0, (args, kwargs))
return func
return _decorator
def param2id(object_id):
"""Helper function to convert various id types to internal id.
args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10'
"""
if uuidutils.is_uuid_like(object_id):
return object_id
elif '-' in object_id:
# FIXME(ja): mapping occurs in nova?
pass
else:
return int(object_id)
class ShellCommands(object):
def bpython(self):
"""Runs a bpython shell.
Falls back to Ipython/python shell if unavailable
"""
self.run('bpython')
def ipython(self):
"""Runs an Ipython shell.
Falls back to Python shell if unavailable
"""
self.run('ipython')
def python(self):
"""Runs a python shell.
Falls back to Python shell if unavailable
"""
self.run('python')
@args('--shell', dest="shell",
metavar='<bpython|ipython|python>',
help='Python shell')
def run(self, shell=None):
"""Runs a Python interactive interpreter."""
if not shell:
shell = 'bpython'
if shell == 'bpython':
try:
import bpython
bpython.embed()
except ImportError:
shell = 'ipython'
if shell == 'ipython':
try:
from IPython import embed
embed()
except ImportError:
# Ipython < 0.11
try:
import IPython
# Explicitly pass an empty list as arguments, because
# otherwise IPython would use sys.argv from this script.
shell = IPython.Shell.IPShell(argv=[])
shell.mainloop()
except ImportError:
# no IPython module
shell = 'python'
if shell == 'python':
import code
try:
# Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try',
# because we already know 'readline' was imported successfully.
import rlcompleter # noqa
readline.parse_and_bind("tab:complete")
code.interact()
@args('--path', required=True, help='Script path')
def script(self, path):
"""Runs the script from the specified path with flags set properly.
arguments: path
"""
exec(compile(open(path).read(), path, 'exec'), locals(), globals())
class HostCommands(object):
"""List hosts."""
@args('zone', nargs='?', default=None,
help='Availability Zone (default: %(default)s)')
def list(self, zone=None):
"""Show a list of all physical hosts. Filter by zone.
args: [zone]
"""
print("%-25s\t%-15s" % (_('host'), _('zone')))
ctxt = context.get_admin_context()
services = db.service_get_all(ctxt)
if zone:
services = [
s for s in services if s['availability_zone']['name'] == zone]
hosts = []
for srv in services:
if not [h for h in hosts if h['host'] == srv['host']]:
hosts.append(srv)
for h in hosts:
print("%-25s\t%-15s" % (h['host'], h['availability_zone']['name']))
class DbCommands(object):
"""Class for managing the database."""
def __init__(self):
pass
@args('version', nargs='?', default=None,
help='Database version')
def sync(self, version=None):
"""Sync the database up to the most recent version."""
return migration.upgrade(version)
def version(self):
"""Print the current database version."""
print(migration.version())
# NOTE(imalinovskiy):
# Manila init migration hardcoded here,
# because alembic has strange behaviour:
# downgrade base = downgrade from head(162a3e673105) -> base(162a3e673105)
# = downgrade from 162a3e673105 -> (empty) [ERROR]
# downgrade 162a3e673105 = downgrade from head(162a3e673105)->162a3e673105
# = do nothing [OK]
@args('version', nargs='?', default='162a3e673105',
help='Version to downgrade')
def downgrade(self, version=None):
"""Downgrade database to the given version."""
return migration.downgrade(version)
@args('--message', help='Revision message')
@args('--autogenerate', help='Autogenerate migration from schema')
def revision(self, message, autogenerate):
"""Generate new migration."""
return migration.revision(message, autogenerate)
@args('version', nargs='?', default=None,
help='Version to stamp version table with')
def stamp(self, version=None):
"""Stamp the version table with the given version."""
return migration.stamp(version)
class VersionCommands(object):
"""Class for exposing the codebase version."""
def list(self):
print(version.version_string())
def __call__(self):
self.list()
class ConfigCommands(object):
"""Class for exposing the flags defined by flag_file(s)."""
def list(self):
for key, value in CONF.items():
if value is not None:
print('%s = %s' % (key, value))
class GetLogCommands(object):
"""Get logging information."""
def errors(self):
"""Get all of the errors from the log files."""
error_found = 0
if CONF.log_dir:
logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')]
for file in logs:
log_file = os.path.join(CONF.log_dir, file)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print_name = 0
for index, line in enumerate(lines):
if line.find(" ERROR ") > 0:
error_found += 1
if print_name == 0:
print(log_file + ":-")
print_name = 1
print("Line %d : %s" % (len(lines) - index, line))
if error_found == 0:
print("No errors in logfiles!")
@args('num_entries', nargs='?', type=int, default=10,
help='Number of entries to list (default: %(default)d)')
def syslog(self, num_entries=10):
"""Get <num_entries> of the manila syslog events."""
entries = int(num_entries)
count = 0
log_file = ''
if os.path.exists('/var/log/syslog'):
log_file = '/var/log/syslog'
elif os.path.exists('/var/log/messages'):
log_file = '/var/log/messages'
else:
print("Unable to find system log file!")
sys.exit(1)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print("Last %s manila syslog entries:-" % (entries))
for line in lines:
if line.find("manila") > 0:
count += 1
print("%s" % (line))
if count == entries:
break
if count == 0:
print("No manila entries in syslog!")
class ServiceCommands(object):
"""Methods for managing services."""
def list(self):
"""Show a list of all manila services."""
ctxt = context.get_admin_context()
services = db.service_get_all(ctxt)
print_format = "%-16s %-36s %-16s %-10s %-5s %-10s"
print(print_format % (
_('Binary'),
_('Host'),
_('Zone'),
_('Status'),
_('State'),
_('Updated At'))
)
for svc in services:
alive = utils.service_is_up(svc)
art = ":-)" if alive else "XXX"
status = 'enabled'
if svc['disabled']:
status = 'disabled'
print(print_format % (
svc['binary'],
svc['host'].partition('.')[0],
svc['availability_zone']['name'],
status,
art,
svc['updated_at'],
))
CATEGORIES = {
'config': ConfigCommands,
'db': DbCommands,
'host': HostCommands,
'logs': GetLogCommands,
'service': ServiceCommands,
'shell': ShellCommands,
'version': VersionCommands
}
def methods_of(obj):
"""Get all callable methods of an object that don't start with underscore.
Returns a list of tuples of the form (method_name, method).
"""
result = []
for i in dir(obj):
if callable(getattr(obj, i)) and not i.startswith('_'):
result.append((i, getattr(obj, i)))
return result
def add_command_parsers(subparsers):
for category in CATEGORIES:
command_object = CATEGORIES[category]()
parser = subparsers.add_parser(category)
parser.set_defaults(command_object=command_object)
category_subparsers = parser.add_subparsers(dest='action')
for (action, action_fn) in methods_of(command_object):
parser = category_subparsers.add_parser(action)
action_kwargs = []
for args, kwargs in getattr(action_fn, 'args', []):
parser.add_argument(*args, **kwargs)
parser.set_defaults(action_fn=action_fn)
parser.set_defaults(action_kwargs=action_kwargs)
category_opt = cfg.SubCommandOpt('category',
title='Command categories',
handler=add_command_parsers)
def get_arg_string(args):
arg = None
if args[0] == '-':
# (Note)zhiteng: args starts with CONF.oparser.prefix_chars
# is optional args. Notice that cfg module takes care of
# actual ArgParser so prefix_chars is always '-'.
if args[1] == '-':
# This is long optional arg
arg = args[2:]
else:
arg = args[1:]
else:
arg = args
return arg
def fetch_func_args(func):
fn_args = []
for args, kwargs in getattr(func, 'args', []):
arg = get_arg_string(args[0])
fn_args.append(getattr(CONF.category, arg))
return fn_args
def main():
"""Parse options and call the appropriate class/method."""
CONF.register_cli_opt(category_opt)
script_name = sys.argv[0]
if len(sys.argv) < 2:
print(_("\nOpenStack manila version: %(version)s\n") %
{'version': version.version_string()})
print(script_name + " category action [<args>]")
print(_("Available categories:"))
for category in CATEGORIES:
print("\t%s" % category)
sys.exit(2)
try:
log.register_options(CONF)
CONF(sys.argv[1:], project='manila',
version=version.version_string())
log.setup(CONF, "manila")
except cfg.ConfigFilesNotFoundError:
cfgfile = CONF.config_file[-1] if CONF.config_file else None
if cfgfile and not os.access(cfgfile, os.R_OK):
st = os.stat(cfgfile)
print(_("Could not read %s. Re-running with sudo") % cfgfile)
try:
os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
except Exception:
print(_('sudo failed, continuing as if nothing happened'))
print(_('Please re-run manila-manage as root.'))
sys.exit(2)
fn = CONF.category.action_fn
fn_args = fetch_func_args(fn)
fn(*fn_args)
if __name__ == '__main__':
main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Configuration, Serializer, Deserializer
from .version import VERSION
from msrest.pipeline import ClientRawResponse
from . import models
class AutoRestResourceFlatteningTestServiceConfiguration(Configuration):
"""Configuration for AutoRestResourceFlatteningTestService
Note that all parameters used to create this instance are saved as instance
attributes.
:param str base_url: Service URL
"""
def __init__(
self, base_url=None):
if not base_url:
base_url = 'http://localhost'
super(AutoRestResourceFlatteningTestServiceConfiguration, self).__init__(base_url)
self.add_user_agent('autorestresourceflatteningtestservice/{}'.format(VERSION))
class AutoRestResourceFlatteningTestService(object):
"""Resource Flattening for AutoRest
:ivar config: Configuration for client.
:vartype config: AutoRestResourceFlatteningTestServiceConfiguration
:param str base_url: Service URL
"""
def __init__(
self, base_url=None):
self.config = AutoRestResourceFlatteningTestServiceConfiguration(base_url)
self._client = ServiceClient(None, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '1.0.0'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
def put_array(
self, resource_array=None, custom_headers=None, raw=False, **operation_config):
"""Put External Resource as an Array.
:param resource_array: External Resource as an Array to put
:type resource_array: list of :class:`Resource
<fixtures.acceptancetestsmodelflattening.models.Resource>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorException<fixtures.acceptancetestsmodelflattening.models.ErrorException>`
"""
# Construct URL
url = '/model-flatten/array'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if resource_array is not None:
body_content = self._serialize.body(resource_array, '[Resource]')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_array(
self, custom_headers=None, raw=False, **operation_config):
"""Get External Resource as an Array.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list of :class:`FlattenedProduct
<fixtures.acceptancetestsmodelflattening.models.FlattenedProduct>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: list of :class:`FlattenedProduct
<fixtures.acceptancetestsmodelflattening.models.FlattenedProduct>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorException<fixtures.acceptancetestsmodelflattening.models.ErrorException>`
"""
# Construct URL
url = '/model-flatten/array'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[FlattenedProduct]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_wrapped_array(
self, resource_array=None, custom_headers=None, raw=False, **operation_config):
"""No need to have a route in Express server for this operation. Used to
verify the type flattened is not removed if it's referenced in an
array.
:param resource_array: External Resource as an Array to put
:type resource_array: list of :class:`WrappedProduct
<fixtures.acceptancetestsmodelflattening.models.WrappedProduct>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorException<fixtures.acceptancetestsmodelflattening.models.ErrorException>`
"""
# Construct URL
url = '/model-flatten/wrappedarray'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if resource_array is not None:
body_content = self._serialize.body(resource_array, '[WrappedProduct]')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_wrapped_array(
self, custom_headers=None, raw=False, **operation_config):
"""No need to have a route in Express server for this operation. Used to
verify the type flattened is not removed if it's referenced in an
array.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list of :class:`ProductWrapper
<fixtures.acceptancetestsmodelflattening.models.ProductWrapper>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: list of :class:`ProductWrapper
<fixtures.acceptancetestsmodelflattening.models.ProductWrapper>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorException<fixtures.acceptancetestsmodelflattening.models.ErrorException>`
"""
# Construct URL
url = '/model-flatten/wrappedarray'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[ProductWrapper]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_dictionary(
self, resource_dictionary=None, custom_headers=None, raw=False, **operation_config):
"""Put External Resource as a Dictionary.
:param resource_dictionary: External Resource as a Dictionary to put
:type resource_dictionary: dict
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorException<fixtures.acceptancetestsmodelflattening.models.ErrorException>`
"""
# Construct URL
url = '/model-flatten/dictionary'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if resource_dictionary is not None:
body_content = self._serialize.body(resource_dictionary, '{FlattenedProduct}')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_dictionary(
self, custom_headers=None, raw=False, **operation_config):
"""Get External Resource as a Dictionary.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: dict or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: dict or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorException<fixtures.acceptancetestsmodelflattening.models.ErrorException>`
"""
# Construct URL
url = '/model-flatten/dictionary'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('{FlattenedProduct}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_resource_collection(
self, resource_complex_object=None, custom_headers=None, raw=False, **operation_config):
"""Put External Resource as a ResourceCollection.
:param resource_complex_object: External Resource as a
ResourceCollection to put
:type resource_complex_object: :class:`ResourceCollection
<fixtures.acceptancetestsmodelflattening.models.ResourceCollection>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorException<fixtures.acceptancetestsmodelflattening.models.ErrorException>`
"""
# Construct URL
url = '/model-flatten/resourcecollection'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if resource_complex_object is not None:
body_content = self._serialize.body(resource_complex_object, 'ResourceCollection')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_resource_collection(
self, custom_headers=None, raw=False, **operation_config):
"""Get External Resource as a ResourceCollection.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`ResourceCollection
<fixtures.acceptancetestsmodelflattening.models.ResourceCollection>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`ResourceCollection
<fixtures.acceptancetestsmodelflattening.models.ResourceCollection>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorException<fixtures.acceptancetestsmodelflattening.models.ErrorException>`
"""
# Construct URL
url = '/model-flatten/resourcecollection'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_simple_product(
self, simple_body_product=None, custom_headers=None, raw=False, **operation_config):
"""Put Simple Product with client flattening true on the model.
:param simple_body_product: Simple body product to put
:type simple_body_product: :class:`SimpleProduct
<fixtures.acceptancetestsmodelflattening.models.SimpleProduct>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`SimpleProduct
<fixtures.acceptancetestsmodelflattening.models.SimpleProduct>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`SimpleProduct
<fixtures.acceptancetestsmodelflattening.models.SimpleProduct>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorException<fixtures.acceptancetestsmodelflattening.models.ErrorException>`
"""
# Construct URL
url = '/model-flatten/customFlattening'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if simple_body_product is not None:
body_content = self._serialize.body(simple_body_product, 'SimpleProduct')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SimpleProduct', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def post_flattened_simple_product(
self, product_id, max_product_display_name, description=None, generic_value=None, odatavalue=None, custom_headers=None, raw=False, **operation_config):
"""Put Flattened Simple Product with client flattening true on the
parameter.
:param product_id: Unique identifier representing a specific product
for a given latitude & longitude. For example, uberX in San Francisco
will have a different product_id than uberX in Los Angeles.
:type product_id: str
:param max_product_display_name: Display name of product.
:type max_product_display_name: str
:param description: Description of product.
:type description: str
:param generic_value: Generic URL value.
:type generic_value: str
:param odatavalue: URL value.
:type odatavalue: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`SimpleProduct
<fixtures.acceptancetestsmodelflattening.models.SimpleProduct>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`SimpleProduct
<fixtures.acceptancetestsmodelflattening.models.SimpleProduct>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorException<fixtures.acceptancetestsmodelflattening.models.ErrorException>`
"""
simple_body_product = None
if product_id is not None or description is not None or max_product_display_name is not None or generic_value is not None or odatavalue is not None:
simple_body_product = models.SimpleProduct(product_id=product_id, description=description, max_product_display_name=max_product_display_name, generic_value=generic_value, odatavalue=odatavalue)
# Construct URL
url = '/model-flatten/customFlattening'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if simple_body_product is not None:
body_content = self._serialize.body(simple_body_product, 'SimpleProduct')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SimpleProduct', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_simple_product_with_grouping(
self, flatten_parameter_group, custom_headers=None, raw=False, **operation_config):
"""Put Simple Product with client flattening true on the model.
:param flatten_parameter_group: Additional parameters for the
operation
:type flatten_parameter_group: :class:`FlattenParameterGroup
<fixtures.acceptancetestsmodelflattening.models.FlattenParameterGroup>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`SimpleProduct
<fixtures.acceptancetestsmodelflattening.models.SimpleProduct>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`SimpleProduct
<fixtures.acceptancetestsmodelflattening.models.SimpleProduct>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises:
:class:`ErrorException<fixtures.acceptancetestsmodelflattening.models.ErrorException>`
"""
name = None
if flatten_parameter_group is not None:
name = flatten_parameter_group.name
product_id = None
if flatten_parameter_group is not None:
product_id = flatten_parameter_group.product_id
description = None
if flatten_parameter_group is not None:
description = flatten_parameter_group.description
max_product_display_name = None
if flatten_parameter_group is not None:
max_product_display_name = flatten_parameter_group.max_product_display_name
generic_value = None
if flatten_parameter_group is not None:
generic_value = flatten_parameter_group.generic_value
odatavalue = None
if flatten_parameter_group is not None:
odatavalue = flatten_parameter_group.odatavalue
simple_body_product = None
if product_id is not None or description is not None or max_product_display_name is not None or generic_value is not None or odatavalue is not None:
simple_body_product = models.SimpleProduct(product_id=product_id, description=description, max_product_display_name=max_product_display_name, generic_value=generic_value, odatavalue=odatavalue)
# Construct URL
url = '/model-flatten/customFlattening/parametergrouping/{name}/'
path_format_arguments = {
'name': self._serialize.url("name", name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if simple_body_product is not None:
body_content = self._serialize.body(simple_body_product, 'SimpleProduct')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SimpleProduct', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
|
"""
Compare the performance of different model parameters
"""
# import packages
import toolbox
import GPy
import json
import os
import pandas as pd
from sklearn.metrics import mean_squared_error as MSE
from sklearn import neural_network
import matplotlib.pyplot as plt
#################################################################################################################
# neural network #
#################################################################################################################
"""
Compare the performance under different solver function and activation function
"""
# solver function
def generate_nn_solver_ratio_result(X_train, X_test, y_train, y_test):
# generate the result for random samples
ratio_result = pd.DataFrame(y_test, columns=['ratio_baseline'])
print "Solver Function: lbfgs"
model = neural_network.MLPRegressor(solver='lbfgs', max_iter=1000, learning_rate_init=0.005)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
ratio_result['lbfgs'] = y_pred
print "Solver Function: sgd"
model = neural_network.MLPRegressor(solver='sgd', max_iter=1000, learning_rate_init=0.005)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
ratio_result['sgd'] = y_pred
print "Solver Function: adam"
model = neural_network.MLPRegressor(solver='adam', max_iter=1000, learning_rate_init=0.005)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
ratio_result['adam'] = y_pred
return ratio_result
def check_nn_solver_performance(output_test, ratio_result):
# process the result to obtain the pred arrival time with different models
time_result = pd.DataFrame(output_test[2], columns=['actual'])
time_result['baseline'] = output_test[1]
time_result['lbfgs'] = time_result['baseline'] * ratio_result['lbfgs']
time_result['sgd'] = time_result['baseline'] * ratio_result['sgd']
time_result['adam'] = time_result['baseline'] * ratio_result['adam']
# calculate the MSE of the arrival time
columns = time_result.columns
mse_time = dict()
for column in columns:
if column == 'actual':
continue
mse_time[column] = MSE(time_result['actual'], time_result[column])
# process the result to obtain the ratio(actual_arrival_time / pred_arrival_time)
ratio_result['lbfgs'] = ratio_result['ratio_baseline'] / ratio_result['lbfgs']
ratio_result['sgd'] = ratio_result['ratio_baseline'] / ratio_result['sgd']
ratio_result['adam'] = ratio_result['ratio_baseline'] / ratio_result['adam']
# calculate the MSE of ratio
columns = ratio_result.columns
true_ratio = [1.0] * len(ratio_result)
mse_ratio = dict()
for column in columns:
mse_ratio[column] = MSE(true_ratio, ratio_result[column])
return time_result, mse_time, ratio_result, mse_ratio
# activation function
def generate_nn_activation_ratio_result(X_train, X_test, y_train, y_test):
# generate the result for random samples
ratio_result = pd.DataFrame(y_test, columns=['ratio_baseline'])
print "Activation Function: identity"
model = neural_network.MLPRegressor(activation='identity', max_iter=1000, learning_rate_init=0.005)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
ratio_result['identity'] = y_pred
print "Activation Function: logistic"
model = neural_network.MLPRegressor(activation='logistic', max_iter=1000, learning_rate_init=0.005)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
ratio_result['logistic'] = y_pred
print "Activation Function: tanh"
model = neural_network.MLPRegressor(activation='tanh', max_iter=1000, learning_rate_init=0.005)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
ratio_result['tanh'] = y_pred
print "Activation Function: relu"
model = neural_network.MLPRegressor(activation='relu', max_iter=1000, learning_rate_init=0.005)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
ratio_result['relu'] = y_pred
return ratio_result
def check_nn_activation_performance(output_test, ratio_result):
# process the result to obtain the pred arrival time with different models
time_result = pd.DataFrame(output_test[2], columns=['actual'])
time_result['baseline'] = output_test[1]
time_result['identity'] = time_result['baseline'] * ratio_result['identity']
time_result['logistic'] = time_result['baseline'] * ratio_result['logistic']
time_result['tanh'] = time_result['baseline'] * ratio_result['tanh']
time_result['relu'] = time_result['baseline'] * ratio_result['relu']
# calculate the MSE of the arrival time
columns = time_result.columns
mse_time = dict()
for column in columns:
if column == 'actual':
continue
mse_time[column] = MSE(time_result['actual'], time_result[column])
# process the result to obtain the ratio(actual_arrival_time / pred_arrival_time)
ratio_result['identity'] = ratio_result['ratio_baseline'] / ratio_result['identity']
ratio_result['logistic'] = ratio_result['ratio_baseline'] / ratio_result['logistic']
ratio_result['tanh'] = ratio_result['ratio_baseline'] / ratio_result['tanh']
ratio_result['relu'] = ratio_result['ratio_baseline'] / ratio_result['relu']
# calculate the MSE of ratio
columns = ratio_result.columns
true_ratio = [1.0] * len(ratio_result)
mse_ratio = dict()
for column in columns:
mse_ratio[column] = MSE(true_ratio, ratio_result[column])
return time_result, mse_time, ratio_result, mse_ratio
#################################################################################################################
# gaussian process #
#################################################################################################################
"""
Compare the performance under different kernel function for gaussian process
"""
def generate_gaussian_ratio_result(X_train, X_test, y_train, y_test):
# generate the result for random samples
ratio_result = pd.DataFrame(y_test, columns=['ratio_baseline'])
print "Gaussian Process: RBF, ARD=True"
kernel = GPy.kern.RBF(input_dim=6, ARD=True)
model = GPy.models.SparseGPRegression(X_train, y_train.reshape(len(y_train), 1), kernel)
model.optimize('bfgs')
y_pred, y_var = model.predict(X_test)
ratio_result['GP_RBF_ARD'] = y_pred
print "Gaussian Process: RBF, ARD=False"
kernel = GPy.kern.RBF(input_dim=6, ARD=False)
model = GPy.models.SparseGPRegression(X_train, y_train.reshape(len(y_train), 1), kernel)
model.optimize('bfgs')
y_pred, y_var = model.predict(X_test)
ratio_result['GP_RBF_NoARD'] = y_pred
print "Gaussian Process: Matern32, ARD=True"
kernel = GPy.kern.Matern32(input_dim=6, ARD=True)
model = GPy.models.SparseGPRegression(X_train, y_train.reshape(len(y_train), 1), kernel)
model.optimize('bfgs')
y_pred, y_var = model.predict(X_test)
ratio_result['GP_Matern32_ARD'] = y_pred
print "Gaussian Process: Matern32, ARD=False"
kernel = GPy.kern.Matern32(input_dim=6, ARD=False)
model = GPy.models.SparseGPRegression(X_train, y_train.reshape(len(y_train), 1), kernel)
model.optimize('bfgs')
y_pred, y_var = model.predict(X_test)
ratio_result['GP_Matern32_NoARD'] = y_pred
print "Gaussian Process: Matern52, ARD=True"
kernel = GPy.kern.Matern52(input_dim=6, ARD=True)
model = GPy.models.SparseGPRegression(X_train, y_train.reshape(len(y_train), 1), kernel)
model.optimize('bfgs')
y_pred, y_var = model.predict(X_test)
ratio_result['GP_Matern52_ARD'] = y_pred
print "Gaussian Process: Matern52, ARD=False"
kernel = GPy.kern.Matern52(input_dim=6, ARD=False)
model = GPy.models.SparseGPRegression(X_train, y_train.reshape(len(y_train), 1), kernel)
model.optimize('bfgs')
y_pred, y_var = model.predict(X_test)
ratio_result['GP_Matern52_NoARD'] = y_pred
print "Gaussian Process: Linear, ARD=True"
kernel = GPy.kern.Linear(input_dim=6, ARD=True)
model = GPy.models.SparseGPRegression(X_train, y_train.reshape(len(y_train), 1), kernel)
model.optimize('bfgs')
y_pred, y_var = model.predict(X_test)
ratio_result['GP_Linear_ARD'] = y_pred
print "Gaussian Process: Linear, ARD=False"
kernel = GPy.kern.Linear(input_dim=6, ARD=False)
model = GPy.models.SparseGPRegression(X_train, y_train.reshape(len(y_train), 1), kernel)
model.optimize('bfgs')
y_pred, y_var = model.predict(X_test)
ratio_result['GP_Linear_NoARD'] = y_pred
return ratio_result
def check_gaussian_performance(output_test, ratio_result):
# process the result to obtain the pred arrival time with different models
time_result = pd.DataFrame(output_test[2], columns=['actual'])
time_result['baseline'] = output_test[1]
time_result['GP_RBF_ARD'] = time_result['baseline'] * ratio_result['GP_RBF_ARD']
time_result['GP_RBF_NoARD'] = time_result['baseline'] * ratio_result['GP_RBF_NoARD']
time_result['GP_Matern32_ARD'] = time_result['baseline'] * ratio_result['GP_Matern32_ARD']
time_result['GP_Matern32_NoARD'] = time_result['baseline'] * ratio_result['GP_Matern32_NoARD']
time_result['GP_Matern52_ARD'] = time_result['baseline'] * ratio_result['GP_Matern52_ARD']
time_result['GP_Matern52_NoARD'] = time_result['baseline'] * ratio_result['GP_Matern52_NoARD']
time_result['GP_Linear_ARD'] = time_result['baseline'] * ratio_result['GP_Linear_ARD']
time_result['GP_Linear_NoARD'] = time_result['baseline'] * ratio_result['GP_Linear_NoARD']
# calculate the MSE of the arrival time
columns = time_result.columns
mse_time = dict()
for column in columns:
if column == 'actual':
continue
mse_time[column] = MSE(time_result['actual'], time_result[column])
# process the result to obtain the ratio(actual_arrival_time / pred_arrival_time)
ratio_result['GP_RBF_ARD'] = ratio_result['ratio_baseline'] / ratio_result['GP_RBF_ARD']
ratio_result['GP_RBF_NoARD'] = ratio_result['ratio_baseline'] / ratio_result['GP_RBF_NoARD']
ratio_result['GP_Matern32_ARD'] = ratio_result['ratio_baseline'] / ratio_result['GP_Matern32_ARD']
ratio_result['GP_Matern32_NoARD'] = ratio_result['ratio_baseline'] / ratio_result['GP_Matern32_NoARD']
ratio_result['GP_Matern52_ARD'] = ratio_result['ratio_baseline'] / ratio_result['GP_Matern52_ARD']
ratio_result['GP_Matern52_NoARD'] = ratio_result['ratio_baseline'] / ratio_result['GP_Matern52_NoARD']
ratio_result['GP_Linear_ARD'] = ratio_result['ratio_baseline'] / ratio_result['GP_Linear_ARD']
ratio_result['GP_Linear_NoARD'] = ratio_result['ratio_baseline'] / ratio_result['GP_Linear_NoARD']
# calculate the MSE of ratio
columns = ratio_result.columns
true_ratio = [1.0] * len(ratio_result)
mse_ratio = dict()
for column in columns:
mse_ratio[column] = MSE(true_ratio, ratio_result[column])
return time_result, mse_time, ratio_result, mse_ratio
#################################################################################################################
# main functions #
#################################################################################################################
"""
Main interface for users
"""
def compare_models(dataset, generate_ratio_result_function, check_performance_function, save_path=None):
"""
Do model selection for the neural network and gaussian process
:param dataset: the dataframe for the dataset table
:param generate_ratio_result_function: the function for training and prediction in model selection. There are three different choices here:
['generate_nn_solver_ratio_result', 'generate_nn_activation_ratio_result', 'generate_gaussian_ratio_result']
:param check_performance_function: the function for performance assessement in model selection. There are three different choices here:
['check_nn_solver_performance', 'check_nn_activation_performance', 'check_gaussian_performance']
:param save_path: path of a csv file to store the baseline1 result
:return: a list of result for model selection: time_result, mse_time, ratio_result, mse_ratio
"""
plt.style.use('ggplot')
dataset.reset_index(inplace=True)
full_dataset = toolbox.preprocess_dataset(dataset)
X_train, X_test, output_train, output_test = toolbox.split_dataset(full_dataset)
y_train = output_train[0]
y_test = output_test[0]
ratio_result = generate_ratio_result_function(X_train, X_test, y_train, y_test)
time_result, mse_time, ratio_result, mse_ratio = check_performance_function(output_test, ratio_result)
if save_path is not None:
if not os.path.exists(save_path):
os.mkdir(save_path)
time_result.to_csv(save_path + 'time_result.csv')
ratio_result.to_csv(save_path + 'ratio_result.csv')
with open(save_path + 'mse_time.json', 'w') as f:
json.dump(mse_time, f)
with open(save_path + 'mse_ratio.json', 'w') as f:
json.dump(mse_ratio, f)
return time_result, mse_time, ratio_result, mse_ratio
|
|
from itertools import chain, groupby
import numpy as np
from ._base import Descriptor
from ._graph_matrix import DistanceMatrix
__all__ = (
"InformationContent",
"TotalIC",
"StructuralIC",
"BondingIC",
"ComplementaryIC",
"ModifiedIC",
"ZModifiedIC",
)
class BFSTree(object):
__slots__ = ("tree", "visited", "bonds", "atoms")
def __init__(self, mol):
self.tree = {}
self.visited = set()
self.bonds = {}
for b in mol.GetBonds():
s = b.GetBeginAtomIdx()
d = b.GetEndAtomIdx()
t = b.GetBondType()
self.bonds[s, d] = t
self.bonds[d, s] = t
self.atoms = [
(a.GetAtomicNum(), a.GetDegree(), a.GetNeighbors()) for a in mol.GetAtoms()
]
def reset(self, i):
self.tree.clear()
self.visited.clear()
self.tree[i] = ()
self.visited.add(i)
def expand(self):
self._expand(self.tree)
def _expand(self, tree):
for src, dst in list(tree.items()):
self.visited.add(src)
if dst is ():
tree[src] = {
n.GetIdx(): ()
for n in self.atoms[src][2]
if n.GetIdx() not in self.visited
}
else:
self._expand(dst)
def _code(self, tree, before, trail):
if len(tree) == 0:
yield trail
else:
for src, dst in tree.items():
code = []
if before is not None:
bt = self.bonds[before, src]
code.append(bt)
code.append(self.atoms[src][:2])
nxt = tuple(chain(trail, code))
for t in self._code(dst, src, nxt):
yield t
def get_code(self, i, order):
self.reset(i)
for _ in range(order):
self.expand()
return tuple(sorted(self._code(self.tree, None, ())))
class InformationContentBase(Descriptor):
__slots__ = ("_order",)
kekulize = True
def __str__(self):
return self._name + str(self._order)
@classmethod
def preset(cls, version):
return (cls(o) for o in range(6))
def parameters(self):
return (self._order,)
def __init__(self, order=0):
self._order = order
rtype = float
class Ag(InformationContentBase):
__slots__ = ("_order",)
@classmethod
def preset(cls, version):
return ()
_name = "Ag"
def dependencies(self):
return {"D": DistanceMatrix(self.explicit_hydrogens)}
def calculate(self, D):
if self._order == 0:
atoms = [a.GetAtomicNum() for a in self.mol.GetAtoms()]
else:
tree = BFSTree(self.mol)
atoms = [
tree.get_code(i, self._order) for i in range(self.mol.GetNumAtoms())
]
ad = {a: i for i, a in enumerate(atoms)}
Ags = [(k, sum(1 for _ in g)) for k, g in groupby(sorted(atoms))]
Nags = len(Ags)
return (
np.fromiter((ad[k] for k, _ in Ags), "int", Nags),
np.fromiter((ag for _, ag in Ags), "float", Nags),
)
rtype = None
def _shannon_entropy_term(a):
return a * np.log2(a)
shannon_entropy_term = np.vectorize(_shannon_entropy_term)
def shannon_entropy(a, w=1):
N = np.sum(a)
return -np.sum(w * shannon_entropy_term(a / N))
class InformationContent(InformationContentBase):
r"""neighborhood information content descriptor.
:type order: int
:param order: order(number of edge) of subgraph
"""
since = "1.0.0"
__slots__ = ()
def description(self):
return "{}-ordered neighborhood information content".format(self._order)
_name = "IC"
def dependencies(self):
return {"iAgs": Ag(self._order)}
def calculate(self, iAgs):
_, Ags = iAgs
return shannon_entropy(Ags)
class TotalIC(InformationContentBase):
r"""neighborhood total information content descriptor.
.. math::
{\rm TIC}_m = A \cdot {\rm IC}_m
:type order: int
:param order: order(number of edge) of subgraph
"""
since = "1.0.0"
__slots__ = ()
def description(self):
return "{}-ordered neighborhood total information content".format(self._order)
_name = "TIC"
def dependencies(self):
return {"ICm": InformationContent(self._order)}
def calculate(self, ICm):
A = self.mol.GetNumAtoms()
return A * ICm
class StructuralIC(TotalIC):
r"""structural information content descriptor.
.. math::
{\rm SIC}_m = \frac{{\rm IC}_m}{\log_2 A}
:type order: int
:param order: order(number of edge) of subgraph
"""
since = "1.0.0"
__slots__ = ()
def description(self):
return "{}-ordered structural information content".format(self._order)
_name = "SIC"
def calculate(self, ICm):
d = np.log2(self.mol.GetNumAtoms())
with self.rethrow_zerodiv():
return ICm / d
class BondingIC(TotalIC):
r"""bonding information content descriptor.
.. math::
{\rm BIC}_m = \frac{{\rm IC}_m}{\log_2 \sum^B_{b=1} \pi^{*}_b}
:type order: int
:param order: order(number of edge) of subgraph
:returns: NaN when :math:`\sum^B_{b=1} \pi^{*}_b <= 0`
"""
since = "1.0.0"
__slots__ = ()
def description(self):
return "{}-ordered bonding information content".format(self._order)
_name = "BIC"
def calculate(self, ICm):
B = sum(b.GetBondTypeAsDouble() for b in self.mol.GetBonds())
with self.rethrow_zerodiv():
log2B = np.log2(B)
return ICm / log2B
class ComplementaryIC(TotalIC):
r"""complementary information content descriptor.
.. math::
{\rm CIC}_m = \log_2 A - {\rm IC}_m
:type order: int
:param order: order(number of edge) of subgraph
"""
since = "1.0.0"
__slots__ = ()
def description(self):
return "{}-ordered complementary information content".format(self._order)
_name = "CIC"
def calculate(self, ICm):
A = self.mol.GetNumAtoms()
return np.log2(A) - ICm
class ModifiedIC(InformationContent):
r"""modified information content index descriptor.
:type order: int
:param order: order(number of edge) of subgraph
"""
since = "1.0.0"
__slots__ = ()
def description(self):
return "{}-ordered modified information content".format(self._order)
_name = "MIC"
def calculate(self, iAgs):
ids, Ags = iAgs
w = np.vectorize(lambda i: self.mol.GetAtomWithIdx(int(i)).GetMass())(ids)
return shannon_entropy(Ags, w)
class ZModifiedIC(InformationContent):
r"""Z-modified information content index descriptor.
:type order: int
:param order: order(number of edge) of subgraph
"""
since = "1.0.0"
__slots__ = ()
def description(self):
return "{}-ordered Z-modified information content".format(self._order)
_name = "ZMIC"
def calculate(self, iAgs):
ids, Ags = iAgs
w = Ags * np.vectorize(
lambda i: self.mol.GetAtomWithIdx(int(i)).GetAtomicNum()
)(ids)
return shannon_entropy(Ags, w)
|
|
#!/usr/bin/env py
import os
import os.path
import sys
sys.path.append(os.getenv('HELIX_WORK_ROOT'))
import copy
"""
This is a temporary solution to gather h/w info from machine
the package is installed on end machines running perf
However this runs on a limited number of platforms
(but supports all the flavors we currently use for testing)
moving forward we will need a truly cross-plat solution here
"""
from cpuinfo import cpuinfo
import json
import re
import shutil
import socket
import subprocess
import urllib
import urlparse
import helix.azure_storage
import helix.depcheck
import helix.event
import helix.logs
import helix.saferequests
import platform
import psutil
import xunit
import zip_script
from helix.cmdline import command_main
from helix.io import copy_tree_to, ensure_directory_exists, fix_path
log = helix.logs.get_logger()
def _write_output_path(file_path, settings):
(scheme,_,path,_,_,_) = urlparse.urlparse(settings.output_uri, 'file')
if scheme.lower() == 'file':
path = urllib.url2pathname(path)
output_path = os.path.join(path, os.path.basename(file_path))
shutil.copy2(file_path, output_path)
return output_path
else:
try:
fc = helix.azure_storage.get_upload_client(settings)
url = fc.upload(file_path, os.path.basename(file_path))
return url
except ValueError, e:
event_client = helix.event.create_from_uri(settings.event_uri)
event_client.error(settings, "FailedUpload", "Failed to upload " + file_path + "after retry", None)
def _prepare_execution_environment(settings, framework_in_tpa, assembly_list_name):
workitem_dir = fix_path(settings.workitem_working_dir)
correlation_dir = fix_path(settings.correlation_payload_dir)
xunit_drop = os.path.join(correlation_dir, 'xunit')
corerun_drop = os.path.join(correlation_dir, 'corerun')
build_drop = os.path.join(correlation_dir)
test_drop = os.path.join(workitem_dir)
assembly_list = os.path.join(test_drop, assembly_list_name)
test_location = os.path.join(workitem_dir, 'execution')
core_root = os.path.join(workitem_dir, 'core_root')
ensure_directory_exists(test_location)
ensure_directory_exists(core_root)
log.info("Copying only test files from {} to {}".format(test_drop, test_location))
copy_tree_to(test_drop, test_location)
framework_target = core_root if framework_in_tpa else test_location
log.info("Copying product binaries from {} to {}".format(build_drop, framework_target))
_copy_package_files(assembly_list, build_drop, framework_target, core_root, test_location)
def _copy_files_to_dest(src, dest):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dest, item)
if os.path.isfile(s):
shutil.copy2(s, d)
def _prepare_linux_env_for_perf(correlation_dir, xunit_perf_drop, test_location, core_root):
# copy over the cli runner and its dependencies for execution
_copy_files_to_dest(xunit_perf_drop, test_location)
xunit_perf_deps = os.path.join(correlation_dir, "Microsoft.DotNet.xunit.performance.run.core")
if len(os.listdir(xunit_perf_deps)) > 1:
log.info('Multiple directories found under '+xunit_perf_deps+' picking '+os.listdir(xunit_perf_deps)[0])
xunit_perf_deps = os.path.join(xunit_perf_deps, os.listdir(xunit_perf_deps)[0])
xunit_perf_deps = os.path.join(xunit_perf_deps, "lib", "dotnet")
log.info('Copying xunit perf dependencies from '+xunit_perf_deps)
_copy_files_to_dest(xunit_perf_deps, test_location)
dotnet_cli_dir = os.path.join(correlation_dir, "dotnet_cli")
# if local dotnet cli is already installed, skip
if not os.path.exists(dotnet_cli_dir):
# install dotnet cli locally
log.info('Local dotnet cli install not found, launching the insallation script')
dotnet_installer = os.path.join(correlation_dir, "RunnerScripts", "xunitrunner-perf", "ubuntu-dotnet-local-install.sh")
log.info('Setting dotnet cli installation script at '+dotnet_installer+' as executable')
helix.proc.run_and_log_output(("chmod 777 "+dotnet_installer).split(" "))
log.info('Running script '+dotnet_installer)
helix.proc.run_and_log_output((dotnet_installer+" -d "+dotnet_cli_dir+" -v "+os.path.join(correlation_dir, "RunnerScripts", "xunitrunner-perf", "DotNetCliVersion.txt")).split(" "))
else:
log.info('Local dotnet cli install found')
# for dotnet to execute the dll we need the runtime files right next to the dll we need to test
_copy_files_to_dest(core_root, test_location)
def _prepare_windows_env_for_perf(xunit_perf_drop, test_location):
shutil.copy2(os.path.join(xunit_perf_drop, "xunit.performance.run.exe"), test_location)
shutil.copy2(os.path.join(xunit_perf_drop, "xunit.performance.metrics.dll"), test_location)
shutil.copy2(os.path.join(xunit_perf_drop, "xunit.performance.logger.exe"), test_location)
shutil.copy2(os.path.join(xunit_perf_drop, "xunit.runner.utility.desktop.dll"), test_location)
shutil.copy2(os.path.join(xunit_perf_drop, "ProcDomain.dll"), test_location)
shutil.copy2(os.path.join(xunit_perf_drop, "Microsoft.Diagnostics.Tracing.TraceEvent.dll"), test_location)
# used to copy the required xunit perf runner an its dependencies
# note that the perf runner will only be present for perf tests.
def _prepare_perf_execution_environment(settings, perf_runner, osgroup):
correlation_dir = fix_path(settings.correlation_payload_dir)
test_location = os.path.join(fix_path(settings.workitem_working_dir), 'execution')
core_root = os.path.join(settings.workitem_working_dir, 'core_root')
os.environ['CORE_ROOT'] = core_root
xunit_perf_drop = os.path.join(correlation_dir, perf_runner)
if not os.path.exists(xunit_perf_drop):
raise Exception("Failed to find perf runner {} in directory {}.".format(perf_runner, correlation_dir))
# get the first subdir in the root and append it to xunit_perf_drop
build_subdir = os.listdir(xunit_perf_drop)
if len(build_subdir) > 1:
log.info('Multiple directories found in '+xunit_perf_drop+' picking '+build_subdir[0])
xunit_perf_drop = os.path.join(xunit_perf_drop, build_subdir[0])
if perf_runner == 'Microsoft.DotNet.xunit.performance.runner.Windows':
xunit_perf_drop = os.path.join(xunit_perf_drop, "tools")
else:
xunit_perf_drop = os.path.join(xunit_perf_drop, "lib", "netstandard1.3")
log.info("Copying xunit perf drop from {} to {}.".format(xunit_perf_drop, test_location))
if osgroup.lower().startswith('windows'):
_prepare_windows_env_for_perf(xunit_perf_drop, test_location)
elif osgroup.lower().startswith('linux'):
_prepare_linux_env_for_perf(correlation_dir, xunit_perf_drop, test_location, core_root)
else:
log.error('Invalid osgroup '+osgroup)
# copy the architecture specific subdirectories
archSubdirs = os.listdir(xunit_perf_drop)
for archSubdir in archSubdirs:
if os.path.isdir(os.path.join(xunit_perf_drop, archSubdir)):
shutil.copytree(os.path.join(xunit_perf_drop, archSubdir), os.path.join(test_location, archSubdir))
def _copy_package_files(assembly_list, build_drop, test_location, coreroot_location, execution_location):
log.info("Opening assembly list from {}".format(assembly_list))
try:
tempstr = open(assembly_list).read()
assemblylist_obj = json.loads(tempstr)
try:
for assembly_name in assemblylist_obj["corerun"]:
assembly_name = assembly_name.replace("/", os.path.sep)
assembly_name = assembly_name.replace("\\", os.path.sep)
assembly_path = os.path.join(build_drop, assembly_name)
target_path = os.path.join(coreroot_location, os.path.basename(assembly_name))
log.debug("Copying {} to {}".format(assembly_path, target_path))
shutil.copy2(assembly_path, target_path)
for assembly_name in assemblylist_obj["xunit"]:
assembly_name = assembly_name.replace("/", os.path.sep)
assembly_name = assembly_name.replace("\\", os.path.sep)
assembly_path = os.path.join(build_drop, assembly_name)
target_path = os.path.join(execution_location, os.path.basename(assembly_name))
log.debug("Copying {} to {}".format(assembly_path, target_path))
shutil.copy2(assembly_path, target_path)
for assembly_name in assemblylist_obj["testdependency"]:
assembly_name = assembly_name.replace("/", os.path.sep)
assembly_name = assembly_name.replace("\\", os.path.sep)
assembly_path = os.path.join(build_drop, assembly_name)
target_path = os.path.join(test_location, os.path.basename(assembly_name))
log.debug("Copying {} to {}".format(assembly_path, target_path))
shutil.copy2(assembly_path, target_path)
except:
# failed to copy a product file
log.error("Failed to copy product binary, dumping contents of '{}'".format(build_drop))
for root, dirs, files in os.walk(build_drop):
for file in files:
log.info(os.path.join(root, file))
# this is a fatal error so let it propagate
raise
except:
#failure to find assembly list
raise
# does perf-specific tasks; converts results xml to csv and then to json, populates machine information and uploads json
def post_process_perf_results(settings, results_location, workitem_dir, xunit_test_type):
# Use the xunit perf analysis exe from nuget package here
log.info('Converting xml to csv')
payload_dir = fix_path(os.getenv('HELIX_CORRELATION_PAYLOAD'))
xmlconvertorpath = ''
if xunit_test_type == xunit.XUNIT_CONFIG_PERF_WINDOWS:
perf_lib_dir = os.path.join(payload_dir, 'Microsoft.DotNet.xunit.performance.analysis')
if len(os.listdir(perf_lib_dir)) > 1:
log.info('Multiple directories found under '+perf_lib_dir+' picking '+os.listdir(perf_lib_dir)[0])
perf_analysis_version = os.listdir(perf_lib_dir)[0]
xmlconvertorpath = os.path.join(payload_dir, 'Microsoft.DotNet.xunit.performance.analysis', perf_analysis_version, 'tools', 'xunit.performance.analysis.exe')
elif xunit_test_type == xunit.XUNIT_CONFIG_PERF_LINUX:
perf_lib_dir = os.path.join(payload_dir, 'Microsoft.DotNet.xunit.performance.analysis.cli')
if len(os.listdir(perf_lib_dir)) > 1:
log.info('Multiple directories found under '+perf_lib_dir+' picking '+os.listdir(perf_lib_dir)[0])
perf_analysis_version = os.listdir(perf_lib_dir)[0]
dotnet_cli_exec = os.path.join(settings.correlation_payload_dir, "dotnet_cli", "dotnet")
_copy_files_to_dest(os.path.join(payload_dir, 'Microsoft.DotNet.xunit.performance.analysis.cli', perf_analysis_version, 'lib', 'netstandard1.3'), os.path.join(workitem_dir, 'execution'))
xmlconvertorpath = dotnet_cli_exec + ' ' + os.path.join(workitem_dir, 'execution', 'Microsoft.DotNet.xunit.performance.analysis.cli.dll')
else:
log.error('Invalid xunit_test_type')
return
xmlCmd = xmlconvertorpath+' -csv '+os.path.join(workitem_dir, 'results.csv')+' '+results_location
if (helix.proc.run_and_log_output(xmlCmd.split(' '))) != 0:
raise Exception('Failed to generate csv from result xml')
log.info('Uploading the results.csv file')
_write_output_path(os.path.join(workitem_dir, 'results.csv'), settings)
perfscriptsdir = os.path.join(payload_dir, 'RunnerScripts', 'xunitrunner-perf')
perfsettingsjson = ''
with open(os.path.join(perfscriptsdir, 'xunitrunner-perf.json'), 'rb') as perfsettingsjson:
# read the perf-specific settings
perfsettingsjson = json.loads(perfsettingsjson.read())
# need to extract more properties from settings to pass to csvtojsonconvertor.py
jsonFileName = perfsettingsjson['TestProduct']+'-'+settings.workitem_id+'.json'
jsonPath = os.path.join(workitem_dir, jsonFileName)
jsonArgsDict = dict()
jsonArgsDict['--csvFile'] = os.path.join(workitem_dir, 'results.csv')
jsonArgsDict['--jsonFile'] = jsonPath
jsonArgsDict['--jobName'] = settings.correlation_id
jsonArgsDict['--jobDescription'] = '...'
jsonArgsDict['--configName'] = perfsettingsjson['TargetQueue']
jsonArgsDict['--jobGroupName'] = perfsettingsjson['Creator']+'-'+perfsettingsjson['TestProduct']+'-'+perfsettingsjson['Branch']+'-Perf'
jsonArgsDict['--jobTypeName'] = 'Private'
jsonArgsDict['--username'] = perfsettingsjson['Creator']
jsonArgsDict['--userAlias'] = perfsettingsjson['Creator']
jsonArgsDict['--branch'] = perfsettingsjson['TestProduct']
jsonArgsDict['--buildInfoName'] = perfsettingsjson['BuildMoniker']
# extract build number from buildmoniker if official build
buildtokens = perfsettingsjson['BuildMoniker'].split('-')
if len(buildtokens) < 3:
jsonArgsDict['--buildNumber'] = perfsettingsjson['BuildMoniker']
else:
jsonArgsDict['--buildNumber'] = buildtokens[-2] +'.'+buildtokens[-1]
jsonArgsDict['--machinepoolName'] = perfsettingsjson['TargetQueue']
jsonArgsDict['--machinepoolDescription'] = '...'
jsonArgsDict['--microarchName'] = 'SSE2' # cannot be obtained by cpu-info; need to figure out some other way
jsonArgsDict['--numberOfCores'] = psutil.cpu_count(logical=False)
jsonArgsDict['--numberOfLogicalProcessors'] = psutil.cpu_count(logical=True)
# psutil returns mem in bytes, convert it to MB for readability
jsonArgsDict['--totalPhysicalMemory'] = psutil.virtual_memory().total/1024
jsonArgsDict['--osInfoName'] = platform.system()
jsonArgsDict['--osVersion'] = platform.version()
jsonArgsDict['--machineName'] = platform.node()
info = cpuinfo.get_cpu_info()
jsonArgsDict['--architectureName'] = format(info['arch'])
jsonArgsDict['--machineDescription'] = format(info['brand'])
jsonArgsDict['--manufacturerName'] = format(info['vendor_id'])
jsonArgs = [sys.executable, os.path.join(perfscriptsdir, 'csvjsonconvertor.py')]
for key, value in jsonArgsDict.iteritems():
jsonArgs.append(key)
jsonArgs.append(str(value))
if (helix.proc.run_and_log_output(jsonArgs)) != 0:
raise Exception('Failed to generate json from csv file')
# set info to upload result to perf-specific json container
log.info('Uploading the results json')
perfsettings = copy.deepcopy(settings)
perfsettings.output_uri = perfsettingsjson['RootURI']
perfsettings.output_write_token = perfsettingsjson['WriteToken']
perfsettings.output_read_token = perfsettingsjson['ReadToken']
jsonPath = str(jsonPath)
# Upload json with rest of the results
_write_output_path(jsonPath, settings)
# Upload json to the perf specific container
_write_output_path(jsonPath, perfsettings)
def _run_xunit_from_execution(settings, test_dll, xunit_test_type, args):
workitem_dir = fix_path(settings.workitem_working_dir)
test_location = os.path.join(workitem_dir, 'execution')
core_root = os.path.join(workitem_dir, 'core_root')
results_location = os.path.join(workitem_dir, 'test_results.xml')
event_client = helix.event.create_from_uri(settings.event_uri)
log.info("Starting xunit against '{}'".format(test_dll))
xunit_result = xunit.run_tests(
settings,
[test_dll],
test_location,
core_root,
results_location,
xunit_test_type,
args
)
if xunit_test_type == xunit.XUNIT_CONFIG_PERF_WINDOWS or xunit_test_type == xunit.XUNIT_CONFIG_PERF_LINUX:
# perf testing has special requirements on the test output file name.
# make a copy of it in the expected location so we can report the result.
perf_log = os.path.join(test_location, "latest-perf-build.xml")
log.info("Copying {} to {}.".format(perf_log, results_location))
shutil.copy2(perf_log, results_location)
if xunit_test_type == xunit.XUNIT_CONFIG_PERF_WINDOWS:
# only windows runs would generate the etl file
# archive the ETL file and upload it
etl_file = os.path.join(test_location, "latest-perf-build.etl")
etl_zip = os.path.join(test_location, "latest-perf-build.zip")
log.info("Compressing {} into {}".format(etl_file, etl_zip))
zip_script.zipFilesAndFolders(etl_zip, [etl_file], True, True)
log.info("Uploading ETL from {}".format(etl_zip))
uploadlink = _write_output_path(etl_zip, settings)
if uploadlink is not None:
log.info('Blob uploaded at '+uploadlink)
log.info("XUnit exit code: {}".format(xunit_result))
if os.path.exists(results_location):
log.info("Uploading results from {}".format(results_location))
with file(results_location) as result_file:
test_count = 0
for line in result_file:
if '<assembly ' in line:
total_expression = re.compile(r'total="(\d+)"')
match = total_expression.search(line)
if match is not None:
test_count = int(match.groups()[0])
break
post_process_perf_results(settings, results_location, workitem_dir, xunit_test_type)
result_url = _write_output_path(results_location, settings)
log.info("Sending completion event")
event_client.send(
{
'Type': 'XUnitTestResult',
'WorkItemId': settings.workitem_id,
'WorkItemFriendlyName': settings.workitem_friendly_name,
'CorrelationId': settings.correlation_id,
'ResultsXmlUri': result_url,
'TestCount': test_count,
}
)
else:
log.error("Error: No exception thrown, but XUnit results not created")
_report_error(settings)
return xunit_result
def _report_error(settings):
from traceback import format_tb, format_exc
log.error("Error running xunit {}".format(format_exc()))
(type, value, traceback) = sys.exc_info()
event_client = helix.event.create_from_uri(settings.event_uri)
formatted = format_tb(traceback)
workitem_dir = fix_path(settings.workitem_working_dir)
error_path = os.path.join(workitem_dir, 'error.log')
lines = ['Unhandled error: {}\n{}'.format(value, formatted)]
with open(error_path, 'w') as f:
f.writelines(lines)
error_url = _write_output_path(error_path, settings)
log.info("Sending ToF test failure event")
event_client.send(
{
'Type': 'XUnitTestFailure',
'WorkItemId': settings.workitem_id,
'WorkItemFriendlyName': settings.workitem_friendly_name,
'CorrelationId': settings.correlation_id,
'ErrorLogUri': error_url,
}
)
def run_tests(settings, test_dll, framework_in_tpa, assembly_list, perf_runner, osgroup, args):
try:
log.info("Running on '{}'".format(socket.gethostname()))
xunit_test_type = xunit.XUNIT_CONFIG_NETCORE
_prepare_execution_environment(settings, framework_in_tpa, assembly_list)
# perform perf test prep if required
if perf_runner is not None:
_prepare_perf_execution_environment(settings, perf_runner, osgroup)
if perf_runner == 'Microsoft.DotNet.xunit.performance.runner.Windows':
xunit_test_type = xunit.XUNIT_CONFIG_PERF_WINDOWS
else:
xunit_test_type = xunit.XUNIT_CONFIG_PERF_LINUX
return _run_xunit_from_execution(settings, test_dll, xunit_test_type, args)
except:
_report_error(settings)
# XUnit will now only return 0-4 for return codes.
# so, use 5 to indicate a non-XUnit failure
return 5
def main(args=None):
def _main(settings, optlist, args):
"""
Usage::
xunitrunner
[--config config.json]
[--setting name=value]
--dll Test.dll
"""
optdict = dict(optlist)
# check if a perf runner has been specified
perf_runner = None
assembly_list = None
if '--perf-runner' in optdict:
perf_runner = optdict['--perf-runner']
if not os.path.exists(optdict['--dll']):
dllpath = optdict['--dll']
exepath = '.'.join(dllpath.split('.')[:-1])
exepath = exepath + '.exe'
if not os.path.exists(exepath):
raise Exception('No valid test dll or exe found')
else:
optdict['--dll'] = exepath
osgroup = optdict['--osgroup']
#default to windows
if osgroup == '':
osgroup = 'windows'
if '--assemblylist' in optdict:
assembly_list = optdict['--assemblylist']
log.info("Using assemblylist parameter:"+assembly_list)
else:
assembly_list = os.getenv('HELIX_ASSEMBLY_LIST')
log.info('Using assemblylist environment variable:'+assembly_list)
return run_tests(settings, optdict['--dll'], '--tpaframework' in optdict, assembly_list, perf_runner, osgroup, args)
return command_main(_main, ['dll=', 'tpaframework', 'perf-runner=', 'assemblylist=','osgroup='], args)
if __name__ == '__main__':
import sys
sys.exit(main())
helix.depcheck.check_dependencies(__name__)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The abstract :py:class:`Task` class.
It is a central concept of Luigi and represents the state of the workflow.
See :doc:`/tasks` for an overview.
"""
try:
from itertools import imap as map
except ImportError:
pass
import logging
import traceback
import warnings
from luigi import six
from luigi import parameter
from luigi.task_register import Register, TaskClassException
Parameter = parameter.Parameter
logger = logging.getLogger('luigi-interface')
def namespace(namespace=None):
"""
Call to set namespace of tasks declared after the call.
If called without arguments or with ``None`` as the namespace, the namespace
is reset, which is recommended to do at the end of any file where the
namespace is set to avoid unintentionally setting namespace on tasks outside
of the scope of the current file.
The namespace of a Task can also be changed by specifying the property
``task_namespace``. This solution has the advantage that the namespace
doesn't have to be restored.
.. code-block:: python
class Task2(luigi.Task):
task_namespace = 'namespace2'
"""
Register._default_namespace = namespace
def id_to_name_and_params(task_id):
# DEPRECATED
import luigi.tools.parse_task
return luigi.tools.parse_task.id_to_name_and_params(task_id)
class BulkCompleteNotImplementedError(NotImplementedError):
"""This is here to trick pylint.
pylint thinks anything raising NotImplementedError needs to be implemented
in any subclass. bulk_complete isn't like that. This tricks pylint into
thinking that the default implementation is a valid implementation and no
an abstract method."""
pass
@six.add_metaclass(Register)
class Task(object):
"""
This is the base class of all Luigi Tasks, the base unit of work in Luigi.
A Luigi Task describes a unit or work.
The key methods of a Task, which must be implemented in a subclass are:
* :py:meth:`run` - the computation done by this task.
* :py:meth:`requires` - the list of Tasks that this Task depends on.
* :py:meth:`output` - the output :py:class:`Target` that this Task creates.
Parameters to the Task should be declared as members of the class, e.g.::
.. code-block:: python
class MyTask(luigi.Task):
count = luigi.IntParameter()
Each Task exposes a constructor accepting all :py:class:`Parameter` (and
values) as kwargs. e.g. ``MyTask(count=10)`` would instantiate `MyTask`.
In addition to any declared properties and methods, there are a few
non-declared properties, which are created by the :py:class:`Register`
metaclass:
``Task.task_namespace``
optional string which is prepended to the task name for the sake of
scheduling. If it isn't overridden in a Task, whatever was last declared
using `luigi.namespace` will be used.
``Task._parameters``
list of ``(parameter_name, parameter)`` tuples for this task class
"""
_event_callbacks = {}
#: Priority of the task: the scheduler should favor available
#: tasks with higher priority values first.
#: See :ref:`Task.priority`
priority = 0
disabled = False
#: Resources used by the task. Should be formatted like {"scp": 1} to indicate that the
#: task requires 1 unit of the scp resource.
resources = {}
#: Number of seconds after which to time out the run function.
#: No timeout if set to 0.
#: Defaults to 0 or value in luigi.cfg
worker_timeout = None
@property
def use_cmdline_section(self):
''' Property used by core config such as `--workers` etc.
These will be exposed without the class as prefix.'''
return True
@classmethod
def event_handler(cls, event):
"""
Decorator for adding event handlers.
"""
def wrapped(callback):
cls._event_callbacks.setdefault(cls, {}).setdefault(event, set()).add(callback)
return callback
return wrapped
def trigger_event(self, event, *args, **kwargs):
"""
Trigger that calls all of the specified events associated with this class.
"""
for event_class, event_callbacks in six.iteritems(self._event_callbacks):
if not isinstance(self, event_class):
continue
for callback in event_callbacks.get(event, []):
try:
# callbacks are protected
callback(*args, **kwargs)
except KeyboardInterrupt:
return
except BaseException:
logger.exception("Error in event callback for %r", event)
@property
def task_module(self):
''' Returns what Python module to import to get access to this class. '''
# TODO(erikbern): we should think about a language-agnostic mechanism
return self.__class__.__module__
@property
def task_family(self):
"""
Convenience method since a property on the metaclass isn't directly accessible through the class instances.
"""
return self.__class__.task_family
@classmethod
def get_params(cls):
"""
Returns all of the Parameters for this Task.
"""
# We want to do this here and not at class instantiation, or else there is no room to extend classes dynamically
params = []
for param_name in dir(cls):
param_obj = getattr(cls, param_name)
if not isinstance(param_obj, Parameter):
continue
params.append((param_name, param_obj))
# The order the parameters are created matters. See Parameter class
params.sort(key=lambda t: t[1].counter)
return params
@classmethod
def get_param_values(cls, params, args, kwargs):
"""
Get the values of the parameters from the args and kwargs.
:param params: list of (param_name, Parameter).
:param args: positional arguments
:param kwargs: keyword arguments.
:returns: list of `(name, value)` tuples, one for each parameter.
"""
result = {}
params_dict = dict(params)
task_name = cls.task_family
# In case any exceptions are thrown, create a helpful description of how the Task was invoked
# TODO: should we detect non-reprable arguments? These will lead to mysterious errors
exc_desc = '%s[args=%s, kwargs=%s]' % (task_name, args, kwargs)
# Fill in the positional arguments
positional_params = [(n, p) for n, p in params if p.positional]
for i, arg in enumerate(args):
if i >= len(positional_params):
raise parameter.UnknownParameterException('%s: takes at most %d parameters (%d given)' % (exc_desc, len(positional_params), len(args)))
param_name, param_obj = positional_params[i]
result[param_name] = arg
# Then the optional arguments
for param_name, arg in six.iteritems(kwargs):
if param_name in result:
raise parameter.DuplicateParameterException('%s: parameter %s was already set as a positional parameter' % (exc_desc, param_name))
if param_name not in params_dict:
raise parameter.UnknownParameterException('%s: unknown parameter %s' % (exc_desc, param_name))
result[param_name] = arg
# Then use the defaults for anything not filled in
for param_name, param_obj in params:
if param_name not in result:
if not param_obj.has_task_value(task_name, param_name):
raise parameter.MissingParameterException("%s: requires the '%s' parameter to be set" % (exc_desc, param_name))
result[param_name] = param_obj.task_value(task_name, param_name)
def list_to_tuple(x):
""" Make tuples out of lists and sets to allow hashing """
if isinstance(x, list) or isinstance(x, set):
return tuple(x)
else:
return x
# Sort it by the correct order and make a list
return [(param_name, list_to_tuple(result[param_name])) for param_name, param_obj in params]
def __init__(self, *args, **kwargs):
"""
Constructor to resolve values for all Parameters.
For example, the Task:
.. code-block:: python
class MyTask(luigi.Task):
count = luigi.IntParameter()
can be instantiated as ``MyTask(count=10)``.
"""
params = self.get_params()
param_values = self.get_param_values(params, args, kwargs)
# Set all values on class instance
for key, value in param_values:
setattr(self, key, value)
# Register args and kwargs as an attribute on the class. Might be useful
self.param_args = tuple(value for key, value in param_values)
self.param_kwargs = dict(param_values)
# Build up task id
task_id_parts = []
param_objs = dict(params)
for param_name, param_value in param_values:
if param_objs[param_name].significant:
task_id_parts.append('%s=%s' % (param_name, param_objs[param_name].serialize(param_value)))
self.task_id = '%s(%s)' % (self.task_family, ', '.join(task_id_parts))
self.__hash = hash(self.task_id)
def initialized(self):
"""
Returns ``True`` if the Task is initialized and ``False`` otherwise.
"""
return hasattr(self, 'task_id')
@classmethod
def from_str_params(cls, params_str=None):
"""
Creates an instance from a str->str hash.
:param params_str: dict of param name -> value.
"""
if params_str is None:
params_str = {}
kwargs = {}
for param_name, param in cls.get_params():
value = param.parse_from_input(param_name, params_str[param_name])
kwargs[param_name] = value
return cls(**kwargs)
def to_str_params(self):
"""
Convert all parameters to a str->str hash.
"""
params_str = {}
params = dict(self.get_params())
for param_name, param_value in six.iteritems(self.param_kwargs):
params_str[param_name] = params[param_name].serialize(param_value)
return params_str
def clone(self, cls=None, **kwargs):
"""
Creates a new instance from an existing instance where some of the args have changed.
There's at least two scenarios where this is useful (see test/clone_test.py):
* remove a lot of boiler plate when you have recursive dependencies and lots of args
* there's task inheritance and some logic is on the base class
:param cls:
:param kwargs:
:return:
"""
k = self.param_kwargs.copy()
k.update(six.iteritems(kwargs))
if cls is None:
cls = self.__class__
new_k = {}
for param_name, param_class in cls.get_params():
if param_name in k:
new_k[param_name] = k[param_name]
return cls(**new_k)
def __hash__(self):
return self.__hash
def __repr__(self):
return self.task_id
def __eq__(self, other):
return self.__class__ == other.__class__ and self.param_args == other.param_args
def complete(self):
"""
If the task has any outputs, return ``True`` if all outputs exists.
Otherwise, return ``False``.
However, you may freely override this method with custom logic.
"""
outputs = flatten(self.output())
if len(outputs) == 0:
warnings.warn(
"Task %r without outputs has no custom complete() method" % self,
stacklevel=2
)
return False
return all(map(lambda output: output.exists(), outputs))
@classmethod
def bulk_complete(cls, parameter_tuples):
"""
Returns those of parameter_tuples for which this Task is complete.
Override (with an efficient implementation) for efficient scheduling
with range tools. Keep the logic consistent with that of complete().
"""
raise BulkCompleteNotImplementedError()
def output(self):
"""
The output that this Task produces.
The output of the Task determines if the Task needs to be run--the task
is considered finished iff the outputs all exist. Subclasses should
override this method to return a single :py:class:`Target` or a list of
:py:class:`Target` instances.
Implementation note
If running multiple workers, the output must be a resource that is accessible
by all workers, such as a DFS or database. Otherwise, workers might compute
the same output since they don't see the work done by other workers.
See :ref:`Task.output`
"""
return [] # default impl
def requires(self):
"""
The Tasks that this Task depends on.
A Task will only run if all of the Tasks that it requires are completed.
If your Task does not require any other Tasks, then you don't need to
override this method. Otherwise, a Subclasses can override this method
to return a single Task, a list of Task instances, or a dict whose
values are Task instances.
See :ref:`Task.requires`
"""
return [] # default impl
def _requires(self):
"""
Override in "template" tasks which themselves are supposed to be
subclassed and thus have their requires() overridden (name preserved to
provide consistent end-user experience), yet need to introduce
(non-input) dependencies.
Must return an iterable which among others contains the _requires() of
the superclass.
"""
return flatten(self.requires()) # base impl
def process_resources(self):
"""
Override in "template" tasks which provide common resource functionality
but allow subclasses to specify additional resources while preserving
the name for consistent end-user experience.
"""
return self.resources # default impl
def input(self):
"""
Returns the outputs of the Tasks returned by :py:meth:`requires`
See :ref:`Task.input`
:return: a list of :py:class:`Target` objects which are specified as
outputs of all required Tasks.
"""
return getpaths(self.requires())
def deps(self):
"""
Internal method used by the scheduler.
Returns the flattened list of requires.
"""
# used by scheduler
return flatten(self._requires())
def run(self):
"""
The task run method, to be overridden in a subclass.
See :ref:`Task.run`
"""
pass # default impl
def on_failure(self, exception):
"""
Override for custom error handling.
This method gets called if an exception is raised in :py:meth:`run`.
Return value of this method is json encoded and sent to the scheduler as the `expl` argument. Its string representation will be used as the body of the error email sent out if any.
Default behavior is to return a string representation of the stack trace.
"""
traceback_string = traceback.format_exc()
return "Runtime error:\n%s" % traceback_string
def on_success(self):
"""
Override for doing custom completion handling for a larger class of tasks
This method gets called when :py:meth:`run` completes without raising any exceptions.
The returned value is json encoded and sent to the scheduler as the `expl` argument.
Default behavior is to send an None value"""
pass
class MixinNaiveBulkComplete(object):
"""
Enables a Task to be efficiently scheduled with e.g. range tools, by providing a bulk_complete implementation which checks completeness in a loop.
Applicable to tasks whose completeness checking is cheap.
This doesn't exploit output location specific APIs for speed advantage, nevertheless removes redundant scheduler roundtrips.
"""
@classmethod
def bulk_complete(cls, parameter_tuples):
return [t for t in parameter_tuples if cls(t).complete()]
def externalize(task):
"""
Returns an externalized version of the Task.
See :py:class:`ExternalTask`.
"""
task.run = NotImplemented
return task
class ExternalTask(Task):
"""
Subclass for references to external dependencies.
An ExternalTask's does not have a `run` implementation, which signifies to
the framework that this Task's :py:meth:`output` is generated outside of
Luigi.
"""
run = NotImplemented
class WrapperTask(Task):
"""
Use for tasks that only wrap other tasks and that by definition are done if all their requirements exist.
"""
def complete(self):
return all(r.complete() for r in flatten(self.requires()))
class Config(Task):
"""Used for configuration that's not specific to a certain task
TODO: let's refactor Task & Config so that it inherits from a common
ParamContainer base class
"""
pass
def getpaths(struct):
"""
Maps all Tasks in a structured data object to their .output().
"""
if isinstance(struct, Task):
return struct.output()
elif isinstance(struct, dict):
r = {}
for k, v in six.iteritems(struct):
r[k] = getpaths(v)
return r
else:
# Remaining case: assume r is iterable...
try:
s = list(struct)
except TypeError:
raise Exception('Cannot map %s to Task/dict/list' % str(struct))
return [getpaths(r) for r in s]
def flatten(struct):
"""
Creates a flat list of all all items in structured output (dicts, lists, items):
.. code-block:: python
>>> sorted(flatten({'a': 'foo', 'b': 'bar'}))
['bar', 'foo']
>>> sorted(flatten(['foo', ['bar', 'troll']]))
['bar', 'foo', 'troll']
>>> flatten('foo')
['foo']
>>> flatten(42)
[42]
"""
if struct is None:
return []
flat = []
if isinstance(struct, dict):
for _, result in six.iteritems(struct):
flat += flatten(result)
return flat
if isinstance(struct, six.string_types):
return [struct]
try:
# if iterable
iterator = iter(struct)
except TypeError:
return [struct]
for result in iterator:
flat += flatten(result)
return flat
def flatten_output(task):
"""
Lists all output targets by recursively walking output-less (wrapper) tasks.
FIXME order consistently.
"""
r = flatten(task.output())
if not r:
for dep in flatten(task.requires()):
r += flatten_output(dep)
return r
|
|
# -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
import logging
from unittest.util import safe_repr
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.http import HttpRequest
from django.test import TestCase
from guardian.shortcuts import assign_perm, remove_perm, get_perms
from rest_framework import status
from rest_framework.request import Request
from rest_framework.test import APIClient
logger = logging.getLogger(__name__)
def get_fake_request():
django_request = HttpRequest()
django_request.META['SERVER_NAME'] = 'localhost.localdomain'
django_request.META['SERVER_PORT'] = 80
return Request(django_request)
def group_has_perm(group, perm, obj=None):
if obj:
return perm in get_perms(group, obj)
else:
for gperm in group.permissions.all():
if gperm.codename == perm:
return True
return False
class PermissionsMixin(object):
permission_tests = {}
@classmethod
def _error_check_permissions(cls):
# Error checking
if not cls.permission_tests:
# No tests, we'll just stop here - we don't want to fail
return False
if not isinstance(cls.permission_tests, dict):
raise AssertionError('The `permission_tests` attribute must be a dict')
if 'model' not in cls.permission_tests:
raise AssertionError('You must specify a model to create an instance of')
if 'endpoint' not in cls.permission_tests:
raise AssertionError('You must specify an endpoint')
# Things look OK
return True
def set_up_perms(self):
# Create the object
self.obj = self.permission_tests['model'](
**self.permission_tests.get('create_data', {})
)
self.obj.save()
def test_permissions(self):
"""
Generic method to test permissions for each resource
"""
if not self._error_check_permissions():
# Just succeed immediately if necessary
return
self.client.login(username='test.user', password='1234')
endpoint = self.permission_tests['endpoint'].format(self.obj.pk)
# Iterate over the methods / permissions
for perm_type in self.permission_tests['permission_types']:
# Should fail now - no permission
method = perm_type['method']
response = getattr(self.client, method)(endpoint, perm_type.get('data', {}))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
# Assign permission
assign_perm(self.permission_tests['permission'] % perm_type['perm'],
self.user, self.obj)
# Should work now - permission granted
response = getattr(self.client, method)(endpoint, perm_type.get('data', {}))
expected_code = perm_type.get('code', status.HTTP_200_OK)
self.assertEqual(response.status_code, expected_code)
# Remove permission
remove_perm(self.permission_tests['permission'] % perm_type['perm'],
self.user, self.obj)
def _add_admin_object_permission(self):
endpoint = self.permission_tests['endpoint'].format(self.obj.pk)
endpoint += 'permissions/users/'
self.client.login(username='test.admin', password='1234')
# Grant permissions to test.user via the API (but poorly - test to make sure
# there is an array or perms)
response = self.client.post(endpoint, {'user': 'test.user', 'permissions': 'admin'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# Now actually grant permissions
response = self.client.post(endpoint, {'user': 'test.user', 'permissions': ['admin']})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Make sure the user now has admin permissions
self.assertTrue(self.user.has_perm(self.permission_tests['permission'] % 'admin', self.obj))
self.client.login(username='test.user', password='1234')
def test_add_object_permissions(self):
endpoint = self.permission_tests['endpoint'].format(self.obj.pk)
endpoint += 'permissions/users/'
self.client.login(username='test.user', password='1234')
# Try hitting the user permissions endpoint
response = self.client.get(endpoint)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
# Add admin permission
self._add_admin_object_permission()
# Now try again with permissions
response = self.client.get(endpoint)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_partial_update_object_permissions(self):
orig_endpoint = self.permission_tests['endpoint'].format(self.obj.pk)
endpoint = orig_endpoint + 'permissions/users/test.user/'
self.client.login(username='test.user', password='1234')
# Try hitting the user permissions endpoint
response = self.client.put(endpoint, {'user': 'test.user', 'permissions': 'blah'})
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
# Add admin permission
self._add_admin_object_permission()
# Now try again with permissions, but try with an invalid permission
response = self.client.put(endpoint, {'user': 'test.user', 'permissions': ['blah']})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# Now try updating for real
response = self.client.patch(endpoint, {'user': 'test.user', 'permissions': ['view']})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(self.user.has_perm(self.permission_tests['permission'] % 'view', self.obj))
# Try grabbing the object
response = self.client.get(orig_endpoint)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Make sure the endpoint properly shows the permissions
response = self.client.get(endpoint)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['permissions'], ['admin', 'view'])
class StackdioTestCase(TestCase):
"""
Base test case class for stackd.io. We'll add a client object, and create an admin and a
regular user. We'll also create an 'everybody' group with permissions to view most of the
endpoints.
"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.get(username='test.user')
self.admin = get_user_model().objects.get(username='test.admin')
self.group = Group.objects.get(name='stackdio')
self.user.groups.add(self.group)
if hasattr(self, 'set_up_perms'):
self.set_up_perms()
@classmethod
def setUpTestData(cls):
user_model = get_user_model()
user_model.objects.create_superuser('test.admin', 'test.admin@stackd.io', '1234',
first_name='Test', last_name='Admin')
user_model.objects.create_user('test.user', 'test.user@stackd.io', '1234',
first_name='Test', last_name='User')
Group.objects.create(name='stackdio')
def assertCallable(self, obj, msg=None):
"""Same as self.assertTrue(callable(obj)), with a nicer
default message."""
if not callable(obj):
standardMsg = '%s is not callable' % (safe_repr(obj))
self.fail(self._formatMessage(msg, standardMsg))
|
|
"""Shared OS X support functions."""
import os
import re
import sys
__all__ = [
'compiler_fixup',
'customize_config_vars',
'customize_compiler',
'get_platform_osx',
]
# configuration variables that may contain universal build flags,
# like "-arch" or "-isdkroot", that may need customization for
# the user environment
_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS',
'BLDSHARED', 'LDSHARED', 'CC', 'CXX',
'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS',
'PY_CORE_CFLAGS')
# configuration variables that may contain compiler calls
_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX')
# prefix added to original configuration variable names
_INITPRE = '_OSX_SUPPORT_INITIAL_'
def _find_executable(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
base, ext = os.path.splitext(executable)
if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'):
executable = executable + '.exe'
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
else:
return executable
def _read_output(commandstring):
"""Output from succesful command execution or None"""
# Similar to os.popen(commandstring, "r").read(),
# but without actually using os.popen because that
# function is not usable during python bootstrap.
# tempfile is also not available then.
import contextlib
try:
import tempfile
fp = tempfile.NamedTemporaryFile()
except ImportError:
fp = open("/tmp/_osx_support.%s"%(
os.getpid(),), "w+b")
with contextlib.closing(fp) as fp:
cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name)
return fp.read().decode('utf-8').strip() if not os.system(cmd) else None
def _find_build_tool(toolname):
"""Find a build tool on current path or using xcrun"""
return (_find_executable(toolname)
or _read_output("/usr/bin/xcrun -find %s" % (toolname,))
or ''
)
_SYSTEM_VERSION = None
def _get_system_version():
"""Return the OS X system version as a string"""
# Reading this plist is a documented way to get the system
# version (see the documentation for the Gestalt Manager)
# We avoid using platform.mac_ver to avoid possible bootstrap issues during
# the build of Python itself (distutils is used to build standard library
# extensions).
global _SYSTEM_VERSION
if _SYSTEM_VERSION is None:
_SYSTEM_VERSION = ''
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
_SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
return _SYSTEM_VERSION
def _remove_original_values(_config_vars):
"""Remove original unmodified values for testing"""
# This is needed for higher-level cross-platform tests of get_platform.
for k in list(_config_vars):
if k.startswith(_INITPRE):
del _config_vars[k]
def _save_modified_value(_config_vars, cv, newvalue):
"""Save modified and original unmodified value of configuration var"""
oldvalue = _config_vars.get(cv, '')
if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars):
_config_vars[_INITPRE + cv] = oldvalue
_config_vars[cv] = newvalue
def _supports_universal_builds():
"""Returns True if universal builds are supported on this system"""
# As an approximation, we assume that if we are running on 10.4 or above,
# then we are running with an Xcode environment that supports universal
# builds, in particular -isysroot and -arch arguments to the compiler. This
# is in support of allowing 10.4 universal builds to run on 10.3.x systems.
osx_version = _get_system_version()
if osx_version:
try:
osx_version = tuple(int(i) for i in osx_version.split('.'))
except ValueError:
osx_version = ''
return bool(osx_version >= (10, 4)) if osx_version else False
def _find_appropriate_compiler(_config_vars):
"""Find appropriate C compiler for extension module builds"""
# Issue #13590:
# The OSX location for the compiler varies between OSX
# (or rather Xcode) releases. With older releases (up-to 10.5)
# the compiler is in /usr/bin, with newer releases the compiler
# can only be found inside Xcode.app if the "Command Line Tools"
# are not installed.
#
# Futhermore, the compiler that can be used varies between
# Xcode releases. Upto Xcode 4 it was possible to use 'gcc-4.2'
# as the compiler, after that 'clang' should be used because
# gcc-4.2 is either not present, or a copy of 'llvm-gcc' that
# miscompiles Python.
# skip checks if the compiler was overriden with a CC env variable
if 'CC' in os.environ:
return _config_vars
# The CC config var might contain additional arguments.
# Ignore them while searching.
cc = oldcc = _config_vars['CC'].split()[0]
if not _find_executable(cc):
# Compiler is not found on the shell search PATH.
# Now search for clang, first on PATH (if the Command LIne
# Tools have been installed in / or if the user has provided
# another location via CC). If not found, try using xcrun
# to find an uninstalled clang (within a selected Xcode).
# NOTE: Cannot use subprocess here because of bootstrap
# issues when building Python itself (and os.popen is
# implemented on top of subprocess and is therefore not
# usable as well)
cc = _find_build_tool('clang')
elif os.path.basename(cc).startswith('gcc'):
# Compiler is GCC, check if it is LLVM-GCC
data = _read_output("'%s' --version"
% (cc.replace("'", "'\"'\"'"),))
if 'llvm-gcc' in data:
# Found LLVM-GCC, fall back to clang
cc = _find_build_tool('clang')
if not cc:
raise SystemError(
"Cannot locate working compiler")
if cc != oldcc:
# Found a replacement compiler.
# Modify config vars using new compiler, if not already explictly
# overriden by an env variable, preserving additional arguments.
for cv in _COMPILER_CONFIG_VARS:
if cv in _config_vars and cv not in os.environ:
cv_split = _config_vars[cv].split()
cv_split[0] = cc if cv != 'CXX' else cc + '++'
_save_modified_value(_config_vars, cv, ' '.join(cv_split))
return _config_vars
def _remove_universal_flags(_config_vars):
"""Remove all universal build arguments from config vars"""
for cv in _UNIVERSAL_CONFIG_VARS:
# Do not alter a config var explicitly overriden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _remove_unsupported_archs(_config_vars):
"""Remove any unsupported archs from config vars"""
# Different Xcode releases support different sets for '-arch'
# flags. In particular, Xcode 4.x no longer supports the
# PPC architectures.
#
# This code automatically removes '-arch ppc' and '-arch ppc64'
# when these are not supported. That makes it possible to
# build extensions on OSX 10.7 and later with the prebuilt
# 32-bit installer on the python.org website.
# skip checks if the compiler was overriden with a CC env variable
if 'CC' in os.environ:
return _config_vars
if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None:
# NOTE: Cannot use subprocess here because of bootstrap
# issues when building Python itself
status = os.system("'%s' -arch ppc -x c /dev/null 2>/dev/null"%(
_config_vars['CC'].replace("'", "'\"'\"'"),))
# The Apple compiler drivers return status 255 if no PPC
if (status >> 8) == 255:
# Compiler doesn't support PPC, remove the related
# '-arch' flags if not explicitly overridden by an
# environment variable
for cv in _UNIVERSAL_CONFIG_VARS:
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub('-arch\s+ppc\w*\s', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _override_all_archs(_config_vars):
"""Allow override of all archs with ARCHFLAGS env var"""
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for cv in _UNIVERSAL_CONFIG_VARS:
if cv in _config_vars and '-arch' in _config_vars[cv]:
flags = _config_vars[cv]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _check_for_unavailable_sdk(_config_vars):
"""Remove references to any SDKs not available"""
# If we're on OSX 10.5 or later and the user tries to
# compile an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail. This is particularly important with
# the standalong Command Line Tools alternative to a
# full-blown Xcode install since the CLT packages do not
# provide SDKs. If the SDK is not present, it is assumed
# that the header files and dev libs have been installed
# to /usr and /System/Library by either a standalone CLT
# package or the CLT component within Xcode.
cflags = _config_vars.get('CFLAGS', '')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for cv in _UNIVERSAL_CONFIG_VARS:
# Do not alter a config var explicitly overriden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def compiler_fixup(compiler_so, cc_args):
"""
This function will strip '-isysroot PATH' and '-arch ARCH' from the
compile flags if the user has specified one them in extra_compile_flags.
This is needed because '-arch ARCH' adds another architecture to the
build, without a way to remove an architecture. Furthermore GCC will
barf if multiple '-isysroot' arguments are present.
"""
stripArch = stripSysroot = False
compiler_so = list(compiler_so)
if not _supports_universal_builds():
# OSX before 10.4.0, these don't support -arch and -isysroot at
# all.
stripArch = stripSysroot = True
else:
stripArch = '-arch' in cc_args
stripSysroot = '-isysroot' in cc_args
if stripArch or 'ARCHFLAGS' in os.environ:
while True:
try:
index = compiler_so.index('-arch')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
if 'ARCHFLAGS' in os.environ and not stripArch:
# User specified different -arch flags in the environ,
# see also distutils.sysconfig
compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
if stripSysroot:
while True:
try:
index = compiler_so.index('-isysroot')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
# Check if the SDK that is used during compilation actually exists,
# the universal build requires the usage of a universal SDK and not all
# users have that installed by default.
sysroot = None
if '-isysroot' in cc_args:
idx = cc_args.index('-isysroot')
sysroot = cc_args[idx+1]
elif '-isysroot' in compiler_so:
idx = compiler_so.index('-isysroot')
sysroot = compiler_so[idx+1]
if sysroot and not os.path.isdir(sysroot):
from distutils import log
log.warn("Compiling with an SDK that doesn't seem to exist: %s",
sysroot)
log.warn("Please check your Xcode installation")
return compiler_so
def customize_config_vars(_config_vars):
"""Customize Python build configuration variables.
Called internally from sysconfig with a mutable mapping
containing name/value pairs parsed from the configured
makefile used to build this interpreter. Returns
the mapping updated as needed to reflect the environment
in which the interpreter is running; in the case of
a Python from a binary installer, the installed
environment may be very different from the build
environment, i.e. different OS levels, different
built tools, different available CPU architectures.
This customization is performed whenever
distutils.sysconfig.get_config_vars() is first
called. It may be used in environments where no
compilers are present, i.e. when installing pure
Python dists. Customization of compiler paths
and detection of unavailable archs is deferred
until the first extention module build is
requested (in distutils.sysconfig.customize_compiler).
Currently called from distutils.sysconfig
"""
if not _supports_universal_builds():
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
_remove_universal_flags(_config_vars)
# Allow user to override all archs with ARCHFLAGS env var
_override_all_archs(_config_vars)
# Remove references to sdks that are not found
_check_for_unavailable_sdk(_config_vars)
return _config_vars
def customize_compiler(_config_vars):
"""Customize compiler path and configuration variables.
This customization is performed when the first
extension module build is requested
in distutils.sysconfig.customize_compiler).
"""
# Find a compiler to use for extension module builds
_find_appropriate_compiler(_config_vars)
# Remove ppc arch flags if not supported here
_remove_unsupported_archs(_config_vars)
# Allow user to override all archs with ARCHFLAGS env var
_override_all_archs(_config_vars)
return _config_vars
def get_platform_osx(_config_vars, osname, release, machine):
"""Filter values for get_platform()"""
# called from get_platform() in sysconfig and distutils.util
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '')
macrelease = _get_system_version() or macver
macver = macver or macrelease
if macver:
release = macver
osname = "macosx"
# Use the original CFLAGS value, if available, so that we
# return the same machine type for the platform string.
# Otherwise, distutils may consider this a cross-compiling
# case and disallow installs.
cflags = _config_vars.get(_INITPRE+'CFLAGS',
_config_vars.get('CFLAGS', ''))
if ((macrelease + '.') >= '10.4.' and
'-arch' in cflags.strip()):
# The universal build will build fat binaries, but not on
# systems before 10.4
machine = 'fat'
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxint >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxint >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return (osname, release, machine)
|
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
# tdot function courtesy of Ian Murray:
# Iain Murray, April 2013. iain contactable via iainmurray.net
# http://homepages.inf.ed.ac.uk/imurray2/code/tdot/tdot.py
import numpy as np
from scipy import linalg, weave
import types
import ctypes
from ctypes import byref, c_char, c_int, c_double # TODO
import scipy
import warnings
import os
from config import config
import logging
_scipyversion = np.float64((scipy.__version__).split('.')[:2])
_fix_dpotri_scipy_bug = True
if np.all(_scipyversion >= np.array([0, 14])):
from scipy.linalg import lapack
_fix_dpotri_scipy_bug = False
elif np.all(_scipyversion >= np.array([0, 12])):
#import scipy.linalg.lapack.clapack as lapack
from scipy.linalg import lapack
else:
from scipy.linalg.lapack import flapack as lapack
if config.getboolean('anaconda', 'installed') and config.getboolean('anaconda', 'MKL'):
try:
anaconda_path = str(config.get('anaconda', 'location'))
mkl_rt = ctypes.cdll.LoadLibrary(os.path.join(anaconda_path, 'DLLs', 'mkl_rt.dll'))
dsyrk = mkl_rt.dsyrk
dsyr = mkl_rt.dsyr
_blas_available = True
print 'anaconda installed and mkl is loaded'
except:
_blas_available = False
else:
try:
_blaslib = ctypes.cdll.LoadLibrary(np.core._dotblas.__file__) # @UndefinedVariable
dsyrk = _blaslib.dsyrk_
dsyr = _blaslib.dsyr_
_blas_available = True
except AttributeError as e:
_blas_available = False
warnings.warn("warning: caught this exception:" + str(e))
def force_F_ordered_symmetric(A):
"""
return a F ordered version of A, assuming A is symmetric
"""
if A.flags['F_CONTIGUOUS']:
return A
if A.flags['C_CONTIGUOUS']:
return A.T
else:
return np.asfortranarray(A)
def force_F_ordered(A):
"""
return a F ordered version of A, assuming A is triangular
"""
if A.flags['F_CONTIGUOUS']:
return A
print "why are your arrays not F order?"
return np.asfortranarray(A)
# def jitchol(A, maxtries=5):
# A = force_F_ordered_symmetric(A)
# L, info = lapack.dpotrf(A, lower=1)
# if info == 0:
# return L
# else:
# if maxtries==0:
# raise linalg.LinAlgError, "not positive definite, even with jitter."
# diagA = np.diag(A)
# if np.any(diagA <= 0.):
# raise linalg.LinAlgError, "not pd: non-positive diagonal elements"
# jitter = diagA.mean() * 1e-6
# return jitchol(A+np.eye(A.shape[0])*jitter, maxtries-1)
def jitchol(A, maxtries=5):
A = np.ascontiguousarray(A)
L, info = lapack.dpotrf(A, lower=1)
if info == 0:
return L
else:
diagA = np.diag(A)
if np.any(diagA <= 0.):
raise linalg.LinAlgError, "not pd: non-positive diagonal elements"
jitter = diagA.mean() * 1e-6
num_tries = 0
while num_tries < maxtries and np.isfinite(jitter):
try:
print jitter
L = linalg.cholesky(A + np.eye(A.shape[0]) * jitter, lower=True)
return L
except:
jitter *= 10
finally:
num_tries += 1
raise linalg.LinAlgError, "not positive definite, even with jitter."
import traceback
try: raise
except:
logging.warning('\n'.join(['Added jitter of {:.10e}'.format(jitter),
' in '+traceback.format_list(traceback.extract_stack(limit=2)[-2:-1])[0][2:]]))
import ipdb;ipdb.set_trace()
return L
# def dtrtri(L, lower=1):
# """
# Wrapper for lapack dtrtri function
# Inverse of L
#
# :param L: Triangular Matrix L
# :param lower: is matrix lower (true) or upper (false)
# :returns: Li, info
# """
# L = force_F_ordered(L)
# return lapack.dtrtri(L, lower=lower)
def dtrtrs(A, B, lower=1, trans=0, unitdiag=0):
"""
Wrapper for lapack dtrtrs function
DTRTRS solves a triangular system of the form
A * X = B or A**T * X = B,
where A is a triangular matrix of order N, and B is an N-by-NRHS
matrix. A check is made to verify that A is nonsingular.
:param A: Matrix A(triangular)
:param B: Matrix B
:param lower: is matrix lower (true) or upper (false)
:returns: Solution to A * X = B or A**T * X = B
"""
A = np.asfortranarray(A)
#Note: B does not seem to need to be F ordered!
return lapack.dtrtrs(A, B, lower=lower, trans=trans, unitdiag=unitdiag)
def dpotrs(A, B, lower=1):
"""
Wrapper for lapack dpotrs function
:param A: Matrix A
:param B: Matrix B
:param lower: is matrix lower (true) or upper (false)
:returns:
"""
A = force_F_ordered(A)
return lapack.dpotrs(A, B, lower=lower)
def dpotri(A, lower=1):
"""
Wrapper for lapack dpotri function
DPOTRI - compute the inverse of a real symmetric positive
definite matrix A using the Cholesky factorization A =
U**T*U or A = L*L**T computed by DPOTRF
:param A: Matrix A
:param lower: is matrix lower (true) or upper (false)
:returns: A inverse
"""
if _fix_dpotri_scipy_bug:
assert lower==1, "scipy linalg behaviour is very weird. please use lower, fortran ordered arrays"
lower = 0
A = force_F_ordered(A)
R, info = lapack.dpotri(A, lower=lower) #needs to be zero here, seems to be a scipy bug
symmetrify(R)
return R, info
def pddet(A):
"""
Determinant of a positive definite matrix, only symmetric matricies though
"""
L = jitchol(A)
logdetA = 2*sum(np.log(np.diag(L)))
return logdetA
def trace_dot(a, b):
"""
Efficiently compute the trace of the matrix product of a and b
"""
return np.einsum('ij,ji->', a, b)
def mdot(*args):
"""
Multiply all the arguments using matrix product rules.
The output is equivalent to multiplying the arguments one by one
from left to right using dot().
Precedence can be controlled by creating tuples of arguments,
for instance mdot(a,((b,c),d)) multiplies a (a*((b*c)*d)).
Note that this means the output of dot(a,b) and mdot(a,b) will differ if
a or b is a pure tuple of numbers.
"""
if len(args) == 1:
return args[0]
elif len(args) == 2:
return _mdot_r(args[0], args[1])
else:
return _mdot_r(args[:-1], args[-1])
def _mdot_r(a, b):
"""Recursive helper for mdot"""
if type(a) == types.TupleType:
if len(a) > 1:
a = mdot(*a)
else:
a = a[0]
if type(b) == types.TupleType:
if len(b) > 1:
b = mdot(*b)
else:
b = b[0]
return np.dot(a, b)
def pdinv(A, *args):
"""
:param A: A DxD pd numpy array
:rval Ai: the inverse of A
:rtype Ai: np.ndarray
:rval L: the Cholesky decomposition of A
:rtype L: np.ndarray
:rval Li: the Cholesky decomposition of Ai
:rtype Li: np.ndarray
:rval logdet: the log of the determinant of A
:rtype logdet: float64
"""
L = jitchol(A, *args)
logdet = 2.*np.sum(np.log(np.diag(L)))
Li = dtrtri(L)
Ai, _ = dpotri(L, lower=1)
# Ai = np.tril(Ai) + np.tril(Ai,-1).T
symmetrify(Ai)
return Ai, L, Li, logdet
def dtrtri(L):
"""
Inverts a Cholesky lower triangular matrix
:param L: lower triangular matrix
:rtype: inverse of L
"""
L = force_F_ordered(L)
return lapack.dtrtri(L, lower=1)[0]
def multiple_pdinv(A):
"""
:param A: A DxDxN numpy array (each A[:,:,i] is pd)
:rval invs: the inverses of A
:rtype invs: np.ndarray
:rval hld: 0.5* the log of the determinants of A
:rtype hld: np.array
"""
N = A.shape[-1]
chols = [jitchol(A[:, :, i]) for i in range(N)]
halflogdets = [np.sum(np.log(np.diag(L[0]))) for L in chols]
invs = [dpotri(L[0], True)[0] for L in chols]
invs = [np.triu(I) + np.triu(I, 1).T for I in invs]
return np.dstack(invs), np.array(halflogdets)
def pca(Y, input_dim):
"""
Principal component analysis: maximum likelihood solution by SVD
:param Y: NxD np.array of data
:param input_dim: int, dimension of projection
:rval X: - Nxinput_dim np.array of dimensionality reduced data
:rval W: - input_dimxD mapping from X to Y
"""
if not np.allclose(Y.mean(axis=0), 0.0):
print "Y is not zero mean, centering it locally (GPy.util.linalg.pca)"
# Y -= Y.mean(axis=0)
Z = linalg.svd(Y - Y.mean(axis=0), full_matrices=False)
[X, W] = [Z[0][:, 0:input_dim], np.dot(np.diag(Z[1]), Z[2]).T[:, 0:input_dim]]
v = X.std(axis=0)
X /= v;
W *= v;
return X, W.T
def ppca(Y, Q, iterations=100):
"""
EM implementation for probabilistic pca.
:param array-like Y: Observed Data
:param int Q: Dimensionality for reduced array
:param int iterations: number of iterations for EM
"""
from numpy.ma import dot as madot
N, D = Y.shape
# Initialise W randomly
W = np.random.randn(D, Q) * 1e-3
Y = np.ma.masked_invalid(Y, copy=0)
mu = Y.mean(0)
Ycentered = Y - mu
try:
for _ in range(iterations):
exp_x = np.asarray_chkfinite(np.linalg.solve(W.T.dot(W), madot(W.T, Ycentered.T))).T
W = np.asarray_chkfinite(np.linalg.solve(exp_x.T.dot(exp_x), madot(exp_x.T, Ycentered))).T
except np.linalg.linalg.LinAlgError:
#"converged"
pass
return np.asarray_chkfinite(exp_x), np.asarray_chkfinite(W)
def tdot_numpy(mat, out=None):
return np.dot(mat, mat.T, out)
def tdot_blas(mat, out=None):
"""returns np.dot(mat, mat.T), but faster for large 2D arrays of doubles."""
if (mat.dtype != 'float64') or (len(mat.shape) != 2):
return np.dot(mat, mat.T)
nn = mat.shape[0]
if out is None:
out = np.zeros((nn, nn))
else:
assert(out.dtype == 'float64')
assert(out.shape == (nn, nn))
# FIXME: should allow non-contiguous out, and copy output into it:
assert(8 in out.strides)
# zeroing needed because of dumb way I copy across triangular answer
out[:] = 0.0
# # Call to DSYRK from BLAS
# If already in Fortran order (rare), and has the right sorts of strides I
# could avoid the copy. I also thought swapping to cblas API would allow use
# of C order. However, I tried that and had errors with large matrices:
# http://homepages.inf.ed.ac.uk/imurray2/code/tdot/tdot_broken.py
mat = np.asfortranarray(mat)
TRANS = c_char('n')
N = c_int(mat.shape[0])
K = c_int(mat.shape[1])
LDA = c_int(mat.shape[0])
UPLO = c_char('l')
ALPHA = c_double(1.0)
A = mat.ctypes.data_as(ctypes.c_void_p)
BETA = c_double(0.0)
C = out.ctypes.data_as(ctypes.c_void_p)
LDC = c_int(np.max(out.strides) / 8)
dsyrk(byref(UPLO), byref(TRANS), byref(N), byref(K),
byref(ALPHA), A, byref(LDA), byref(BETA), C, byref(LDC))
symmetrify(out, upper=True)
return np.ascontiguousarray(out)
def tdot(*args, **kwargs):
if _blas_available:
return tdot_blas(*args, **kwargs)
else:
return tdot_numpy(*args, **kwargs)
def DSYR_blas(A, x, alpha=1.):
"""
Performs a symmetric rank-1 update operation:
A <- A + alpha * np.dot(x,x.T)
:param A: Symmetric NxN np.array
:param x: Nx1 np.array
:param alpha: scalar
"""
N = c_int(A.shape[0])
LDA = c_int(A.shape[0])
UPLO = c_char('l')
ALPHA = c_double(alpha)
A_ = A.ctypes.data_as(ctypes.c_void_p)
x_ = x.ctypes.data_as(ctypes.c_void_p)
INCX = c_int(1)
dsyr(byref(UPLO), byref(N), byref(ALPHA),
x_, byref(INCX), A_, byref(LDA))
symmetrify(A, upper=True)
def DSYR_numpy(A, x, alpha=1.):
"""
Performs a symmetric rank-1 update operation:
A <- A + alpha * np.dot(x,x.T)
:param A: Symmetric NxN np.array
:param x: Nx1 np.array
:param alpha: scalar
"""
A += alpha * np.dot(x[:, None], x[None, :])
def DSYR(*args, **kwargs):
if _blas_available:
return DSYR_blas(*args, **kwargs)
else:
return DSYR_numpy(*args, **kwargs)
def symmetrify(A, upper=False):
"""
Take the square matrix A and make it symmetrical by copting elements from the lower half to the upper
works IN PLACE.
note: tries to use weave, falls back to a slower numpy version
"""
if config.getboolean('weave', 'working'):
try:
symmetrify_weave(A, upper)
except:
print "\n Weave compilation failed. Falling back to (slower) numpy implementation\n"
config.set('weave', 'working', 'False')
symmetrify_numpy(A, upper)
else:
symmetrify_numpy(A, upper)
def symmetrify_weave(A, upper=False):
"""
Take the square matrix A and make it symmetrical by copting elements from the lower half to the upper
works IN PLACE.
"""
N, M = A.shape
assert N == M
c_contig_code = """
int iN;
for (int i=1; i<N; i++){
iN = i*N;
for (int j=0; j<i; j++){
A[i+j*N] = A[iN+j];
}
}
"""
f_contig_code = """
int iN;
for (int i=1; i<N; i++){
iN = i*N;
for (int j=0; j<i; j++){
A[iN+j] = A[i+j*N];
}
}
"""
N = int(N) # for safe type casting
if A.flags['C_CONTIGUOUS'] and upper:
weave.inline(f_contig_code, ['A', 'N'], extra_compile_args=['-O3'])
elif A.flags['C_CONTIGUOUS'] and not upper:
weave.inline(c_contig_code, ['A', 'N'], extra_compile_args=['-O3'])
elif A.flags['F_CONTIGUOUS'] and upper:
weave.inline(c_contig_code, ['A', 'N'], extra_compile_args=['-O3'])
elif A.flags['F_CONTIGUOUS'] and not upper:
weave.inline(f_contig_code, ['A', 'N'], extra_compile_args=['-O3'])
else:
if upper:
tmp = np.tril(A.T)
else:
tmp = np.tril(A)
A[:] = 0.0
A += tmp
A += np.tril(tmp, -1).T
def symmetrify_numpy(A, upper=False):
"""
Force a matrix to be symmetric
"""
triu = np.triu_indices_from(A,k=1)
if upper:
A.T[triu] = A[triu]
else:
A[triu] = A.T[triu]
def cholupdate(L, x):
"""
update the LOWER cholesky factor of a pd matrix IN PLACE
if L is the lower chol. of K, then this function computes L\_
where L\_ is the lower chol of K + x*x^T
"""
support_code = """
#include <math.h>
"""
code = """
double r,c,s;
int j,i;
for(j=0; j<N; j++){
r = sqrt(L(j,j)*L(j,j) + x(j)*x(j));
c = r / L(j,j);
s = x(j) / L(j,j);
L(j,j) = r;
for (i=j+1; i<N; i++){
L(i,j) = (L(i,j) + s*x(i))/c;
x(i) = c*x(i) - s*L(i,j);
}
}
"""
x = x.copy()
N = x.size
weave.inline(code, support_code=support_code, arg_names=['N', 'L', 'x'], type_converters=weave.converters.blitz)
def backsub_both_sides(L, X, transpose='left'):
""" Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky"""
if transpose == 'left':
tmp, _ = dtrtrs(L, X, lower=1, trans=1)
return dtrtrs(L, tmp.T, lower=1, trans=1)[0].T
else:
tmp, _ = dtrtrs(L, X, lower=1, trans=0)
return dtrtrs(L, tmp.T, lower=1, trans=0)[0].T
|
|
from pathlib import Path
from tempfile import NamedTemporaryFile
from unittest import TestCase
from unittest.mock import patch
import pytest
from parameterized import parameterized
from transformers import AutoConfig, AutoFeatureExtractor, AutoTokenizer, is_tf_available, is_torch_available
from transformers.onnx import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
ParameterFormat,
export,
validate_model_outputs,
)
if is_torch_available() or is_tf_available():
from transformers.onnx.features import FeaturesManager
from transformers.onnx.utils import compute_effective_axis_dimension, compute_serialized_parameters_size
from transformers.testing_utils import require_onnx, require_tf, require_torch, require_vision, slow
@require_onnx
class OnnxUtilsTestCaseV2(TestCase):
"""
Cover all the utilities involved to export ONNX models
"""
@require_torch
@patch("transformers.onnx.convert.is_torch_onnx_dict_inputs_support_available", return_value=False)
def test_ensure_pytorch_version_ge_1_8_0(self, mock_is_torch_onnx_dict_inputs_support_available):
"""
Ensure we raise an Exception if the pytorch version is unsupported (< 1.8.0)
"""
self.assertRaises(AssertionError, export, None, None, None, None, None)
mock_is_torch_onnx_dict_inputs_support_available.assert_called()
def test_compute_effective_axis_dimension(self):
"""
When exporting ONNX model with dynamic axis (batch or sequence) we set batch_size and/or sequence_length = -1.
We cannot generate an effective tensor with axis dim == -1, so we trick by using some "fixed" values
(> 1 to avoid ONNX squeezing the axis).
This test ensure we are correctly replacing generated batch / sequence tensor with axis > 1
"""
# Dynamic axis (batch, no token added by the tokenizer)
self.assertEqual(compute_effective_axis_dimension(-1, fixed_dimension=2, num_token_to_add=0), 2)
# Static axis (batch, no token added by the tokenizer)
self.assertEqual(compute_effective_axis_dimension(0, fixed_dimension=2, num_token_to_add=0), 2)
# Dynamic axis (sequence, token added by the tokenizer 2 (no pair))
self.assertEqual(compute_effective_axis_dimension(0, fixed_dimension=8, num_token_to_add=2), 6)
self.assertEqual(compute_effective_axis_dimension(0, fixed_dimension=8, num_token_to_add=2), 6)
# Dynamic axis (sequence, token added by the tokenizer 3 (pair))
self.assertEqual(compute_effective_axis_dimension(0, fixed_dimension=8, num_token_to_add=3), 5)
self.assertEqual(compute_effective_axis_dimension(0, fixed_dimension=8, num_token_to_add=3), 5)
def test_compute_parameters_serialized_size(self):
"""
This test ensures we compute a "correct" approximation of the underlying storage requirement (size) for all the
parameters for the specified parameter's dtype.
"""
self.assertEqual(compute_serialized_parameters_size(2, ParameterFormat.Float), 2 * ParameterFormat.Float.size)
def test_flatten_output_collection_property(self):
"""
This test ensures we correctly flatten nested collection such as the one we use when returning past_keys.
past_keys = Tuple[Tuple]
ONNX exporter will export nested collections as ${collection_name}.${level_idx_0}.${level_idx_1}...${idx_n}
"""
self.assertEqual(
OnnxConfig.flatten_output_collection_property("past_key", [[0], [1], [2]]),
{
"past_key.0": 0,
"past_key.1": 1,
"past_key.2": 2,
},
)
class OnnxConfigTestCaseV2(TestCase):
"""
Cover the test for models default.
Default means no specific features is being enabled on the model.
"""
@patch.multiple(OnnxConfig, __abstractmethods__=set())
def test_use_external_data_format(self):
"""
External data format is required only if the serialized size of the parameters if bigger than 2Gb
"""
TWO_GB_LIMIT = EXTERNAL_DATA_FORMAT_SIZE_LIMIT
# No parameters
self.assertFalse(OnnxConfig.use_external_data_format(0))
# Some parameters
self.assertFalse(OnnxConfig.use_external_data_format(1))
# Almost 2Gb parameters
self.assertFalse(OnnxConfig.use_external_data_format((TWO_GB_LIMIT - 1) // ParameterFormat.Float.size))
# Exactly 2Gb parameters
self.assertTrue(OnnxConfig.use_external_data_format(TWO_GB_LIMIT))
# More than 2Gb parameters
self.assertTrue(OnnxConfig.use_external_data_format((TWO_GB_LIMIT + 1) // ParameterFormat.Float.size))
class OnnxConfigWithPastTestCaseV2(TestCase):
"""
Cover the tests for model which have use_cache feature (i.e. "with_past" for ONNX)
"""
SUPPORTED_WITH_PAST_CONFIGS = {}
# SUPPORTED_WITH_PAST_CONFIGS = {
# ("BART", BartConfig),
# ("GPT2", GPT2Config),
# # ("T5", T5Config)
# }
@patch.multiple(OnnxConfigWithPast, __abstractmethods__=set())
def test_use_past(self):
"""
Ensure the use_past variable is correctly being set
"""
for name, config in OnnxConfigWithPastTestCaseV2.SUPPORTED_WITH_PAST_CONFIGS:
with self.subTest(name):
self.assertFalse(
OnnxConfigWithPast.from_model_config(config()).use_past,
"OnnxConfigWithPast.from_model_config() should not use_past",
)
self.assertTrue(
OnnxConfigWithPast.with_past(config()).use_past,
"OnnxConfigWithPast.from_model_config() should use_past",
)
@patch.multiple(OnnxConfigWithPast, __abstractmethods__=set())
def test_values_override(self):
"""
Ensure the use_past variable correctly set the `use_cache` value in model's configuration
"""
for name, config in OnnxConfigWithPastTestCaseV2.SUPPORTED_WITH_PAST_CONFIGS:
with self.subTest(name):
# without past
onnx_config_default = OnnxConfigWithPast.from_model_config(config())
self.assertIsNotNone(onnx_config_default.values_override, "values_override should not be None")
self.assertIn("use_cache", onnx_config_default.values_override, "use_cache should be present")
self.assertFalse(
onnx_config_default.values_override["use_cache"], "use_cache should be False if not using past"
)
# with past
onnx_config_default = OnnxConfigWithPast.with_past(config())
self.assertIsNotNone(onnx_config_default.values_override, "values_override should not be None")
self.assertIn("use_cache", onnx_config_default.values_override, "use_cache should be present")
self.assertTrue(
onnx_config_default.values_override["use_cache"], "use_cache should be False if not using past"
)
PYTORCH_EXPORT_MODELS = {
("albert", "hf-internal-testing/tiny-albert"),
("bert", "bert-base-cased"),
("ibert", "kssteven/ibert-roberta-base"),
("camembert", "camembert-base"),
("distilbert", "distilbert-base-cased"),
("electra", "google/electra-base-generator"),
("roberta", "roberta-base"),
("xlm-roberta", "xlm-roberta-base"),
("layoutlm", "microsoft/layoutlm-base-uncased"),
("vit", "google/vit-base-patch16-224"),
}
PYTORCH_EXPORT_WITH_PAST_MODELS = {
("gpt2", "gpt2"),
("gpt-neo", "EleutherAI/gpt-neo-125M"),
}
PYTORCH_EXPORT_SEQ2SEQ_WITH_PAST_MODELS = {
("bart", "facebook/bart-base"),
("mbart", "sshleifer/tiny-mbart"),
("t5", "t5-small"),
("marian", "Helsinki-NLP/opus-mt-en-de"),
("m2m-100", "facebook/m2m100_418M"),
}
TENSORFLOW_EXPORT_DEFAULT_MODELS = {
("albert", "hf-internal-testing/tiny-albert"),
("bert", "bert-base-cased"),
("ibert", "kssteven/ibert-roberta-base"),
("camembert", "camembert-base"),
("distilbert", "distilbert-base-cased"),
("roberta", "roberta-base"),
("xlm-roberta", "xlm-roberta-base"),
("layoutlm", "microsoft/layoutlm-base-uncased"),
}
TENSORFLOW_EXPORT_WITH_PAST_MODELS = {
("gpt2", "gpt2"),
("gpt-neo", "EleutherAI/gpt-neo-125M"),
}
TENSORFLOW_EXPORT_SEQ2SEQ_WITH_PAST_MODELS = {
("bart", "facebook/bart-base"),
("mbart", "sshleifer/tiny-mbart"),
("t5", "t5-small"),
("marian", "Helsinki-NLP/opus-mt-en-de"),
}
def _get_models_to_test(export_models_list):
models_to_test = []
if is_torch_available() or is_tf_available():
for (name, model) in export_models_list:
for feature, onnx_config_class_constructor in FeaturesManager.get_supported_features_for_model_type(
name
).items():
models_to_test.append((f"{name}_{feature}", name, model, feature, onnx_config_class_constructor))
return sorted(models_to_test)
else:
# Returning some dummy test that should not be ever called because of the @require_torch / @require_tf
# decorators.
# The reason for not returning an empty list is because parameterized.expand complains when it's empty.
return [("dummy", "dummy", "dummy", "dummy", OnnxConfig.from_model_config)]
class OnnxExportTestCaseV2(TestCase):
"""
Integration tests ensuring supported models are correctly exported
"""
def _onnx_export(self, test_name, name, model_name, feature, onnx_config_class_constructor):
from transformers.onnx import export
model_class = FeaturesManager.get_model_class_for_feature(feature)
config = AutoConfig.from_pretrained(model_name)
model = model_class.from_config(config)
onnx_config = onnx_config_class_constructor(model.config)
if is_torch_available():
from transformers.file_utils import torch_version
if torch_version < onnx_config.torch_onnx_minimum_version:
pytest.skip(
f"Skipping due to incompatible PyTorch version. Minimum required is {onnx_config.torch_onnx_minimum_version}, got: {torch_version}"
)
# Check the modality of the inputs and instantiate the appropriate preprocessor
if model.main_input_name == "input_ids":
preprocessor = AutoTokenizer.from_pretrained(model_name)
# Useful for causal lm models that do not use pad tokens.
if not getattr(config, "pad_token_id", None):
config.pad_token_id = preprocessor.eos_token_id
elif model.main_input_name == "pixel_values":
preprocessor = AutoFeatureExtractor.from_pretrained(model_name)
else:
raise ValueError(f"Unsupported model input name: {model.main_input_name}")
with NamedTemporaryFile("w") as output:
try:
onnx_inputs, onnx_outputs = export(
preprocessor, model, onnx_config, onnx_config.default_onnx_opset, Path(output.name)
)
validate_model_outputs(
onnx_config,
preprocessor,
model,
Path(output.name),
onnx_outputs,
onnx_config.atol_for_validation,
)
except (RuntimeError, ValueError) as e:
self.fail(f"{name}, {feature} -> {e}")
@parameterized.expand(_get_models_to_test(PYTORCH_EXPORT_MODELS))
@slow
@require_torch
@require_vision
def test_pytorch_export(self, test_name, name, model_name, feature, onnx_config_class_constructor):
self._onnx_export(test_name, name, model_name, feature, onnx_config_class_constructor)
@parameterized.expand(_get_models_to_test(PYTORCH_EXPORT_WITH_PAST_MODELS))
@slow
@require_torch
def test_pytorch_export_with_past(self, test_name, name, model_name, feature, onnx_config_class_constructor):
self._onnx_export(test_name, name, model_name, feature, onnx_config_class_constructor)
@parameterized.expand(_get_models_to_test(PYTORCH_EXPORT_SEQ2SEQ_WITH_PAST_MODELS))
@slow
@require_torch
def test_pytorch_export_seq2seq_with_past(
self, test_name, name, model_name, feature, onnx_config_class_constructor
):
self._onnx_export(test_name, name, model_name, feature, onnx_config_class_constructor)
@parameterized.expand(_get_models_to_test(TENSORFLOW_EXPORT_DEFAULT_MODELS))
@slow
@require_tf
@require_vision
def test_tensorflow_export(self, test_name, name, model_name, feature, onnx_config_class_constructor):
self._onnx_export(test_name, name, model_name, feature, onnx_config_class_constructor)
@parameterized.expand(_get_models_to_test(TENSORFLOW_EXPORT_WITH_PAST_MODELS))
@slow
@require_tf
def test_tensorflow_export_with_past(self, test_name, name, model_name, feature, onnx_config_class_constructor):
self._onnx_export(test_name, name, model_name, feature, onnx_config_class_constructor)
@parameterized.expand(_get_models_to_test(TENSORFLOW_EXPORT_SEQ2SEQ_WITH_PAST_MODELS))
@slow
@require_tf
def test_tensorflow_export_seq2seq_with_past(
self, test_name, name, model_name, feature, onnx_config_class_constructor
):
self._onnx_export(test_name, name, model_name, feature, onnx_config_class_constructor)
|
|
"""
Copyright 2014 Matt Heitzenroder
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Python wrapper exposes the capabilities of the AOSONG AM2315 humidity
and temperature sensor.
The datasheet for the device can be found here:
http://www.adafruit.com/datasheets/AM2315.pdf
Portions of this code were inspired by Joehrg Ehrsam's am2315-python-api
code. http://code.google.com/p/am2315-python-api/
This library was originally authored by Sopwith:
http://sopwith.ismellsmoke.net/?p=104
"""
import quick2wire.i2c as i2c
import time
import array
import math
class Sensor:
"""Wrapping for an AOSONG AM2315 humidity and temperature sensor.
Provides simple access to a AM2315 chip using the quickwire i2c module
Attributes:
channel: Int containing the smbus channel.
address: AM2315 bus address
bus: quickwire i2c object instance.
lastError: String containing the last error string.Formatter
debug: bool containing debug state
"""
def __init__(self, address=0x5C, debug=False):
self.channel = self.pi_i2c_bus_number() # 0 for pi Rev-1, 1 for pi Rev-2
self.address = address # Default address 0x5C
self.bus = i2c.I2CMaster() # quick2wire master
self.lastError = None # Contains last error string
self.debug = debug # Debug flag
def pi_revision(self):
"""Get the version number of the Raspberry Pi board.
Args:
None
Returns:
An int containing the Pi board revision (1 or 2).
If error, returns 0.
"""
return i2c.revision()
def pi_i2c_bus_number(self):
"""Get the I2C bus number /dev/i2c.
Args:
None
Returns:
An int containing i2c bus number
"""
if i2c.revision() > 1:
return 1
else:
return 0
def data(self):
""" Reads the humidity and temperature from the AS2315.
Args:
None
Returns:
Tuple containing the following fields:
humidity - float
temperature - float (Celsius)
temperature - float (Fahrenheit)
"""
data = None
# Send a wakeup call to the sensor. This call will always fail
try:
self.bus.transaction(i2c.writing(self.address, bytes([0x03,0x0,0x04])))
except:
pass
time.sleep(0.125)
# Now that the device is awake, read the data
try:
self.bus.transaction(i2c.writing(self.address, bytes([0x03,0x0,0x04])))
data = self.bus.transaction(i2c.reading(self.address, 0x08))
data = bytearray(data[0])
except IOError as e:
self.lastError = 'I/O Error({0}): {1}'.format(e.errno,e.strerror)
return None
# 0x03-returned command, 0x04-no bytes read.
if data[0] != 0x03 and data[1] != 0x04:
self.lastError('Error reading data from AM2315 device.')
return None
# Parse the data list
cmd_code = data[0]
byte_cnt = data[1]
humid_H = data[2]
humid_L = data[3]
temp_H = data[4]
temp_L = data[5]
crc_H = data[6]
crc_L = data[7]
negative = False
humidity = (humid_H*256+humid_L)/10
# Check for negative temp
# 16-Sep-2014
# Thanks to Ethan for pointing out this bug!
# ethansimpson@xtra.co.nz
if temp_H&0x08:
negative = True
# Mask the negative flag
temp_H &=0x7F
tempC = (temp_H*256+temp_L)/10
tempF = self.c_to_f(tempC)
# Verify CRC here
crc = 256*data[7] + data[6]
t = bytearray([data[0], data[1], data[2], data[3], data[4], data[5]])
c = self.verify_crc(t)
if crc != c:
assert(0)
self.lastError('CRC error in sensor data.')
return None
if negative:
tempC = -abs(tempC)
tempF = -abs(tempF)
return (humidity, tempC, tempF)
def humidity(self):
"""Read humidity data from the sensor.
Args:
None
Returns:
float = humidity reading, None if error
"""
time.sleep(.25)
data = self.data()
if data != None:
return self.data()[0]
return None
def temperature(self, fahrenheit=False):
"""Read temperature data from the sensor. (Celsius is default)
Args:
bool - if True returns temp in Fahrenheit. Default=False
Returns:
float = humidity reading, None if error
"""
time.sleep(.25)
data = self.data()
if data == None:
return None
if fahrenheit:
return self.data()[2]
return self.data()[1]
def fahrenheit(self):
return self.temperature(True)
def celsius(self):
return self.temperature()
def verify_crc(self, char):
"""Returns the 16-bit CRC of sensor data"""
crc = 0xFFFF
for l in char:
crc = crc ^ l
for i in range(1,9):
if(crc & 0x01):
crc = crc >> 1
crc = crc ^ 0xA001
else:
crc = crc >> 1
return crc
def c_to_f(self, celsius):
"""Convert Celsius to Fahrenheit.
Params:
celsius: int containing C temperature
Returns:
String with Fahrenheit conversion. None if error.
"""
if celsius == None:
return
if celsius == 0:
return 32
try:
tempF = float((celsius*9/5)+32)
return (math.trunc(tempF*10))/10
except:
self.lastError = 'Error converting %s celsius to fahrenheit' % celsius
return None
def last_error(self):
return self.lastError
|
|
import socket, struct
from common import *
from binascii import hexlify, unhexlify
def enum(**enums):
return type('Enum', (), enums)
class TCPGecko:
def __init__(self, *args):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
print("Connecting to " + str(args[0]) + ":7331")
self.s.connect((str(args[0]), 7331)) #IP, 1337 reversed, Cafiine uses 7332+
print("Connected!")
def readmem(self, address, length): #Number of bytes
if length == 0: raise BaseException("Reading memory requires a length (# of bytes)")
if not self.validrange(address, length): raise BaseException("Address range not valid")
if not self.validaccess(address, length, "read"): raise BaseException("Cannot read from address")
ret = b""
if length > 0x400:
print("Length is greater than 0x400 bytes, need to read in chunks")
print("Start address: " + hexstr0(address))
for i in range(int(length / 0x400)): #Number of blocks, ignores extra
self.s.send(b"\x04") #cmd_readmem
request = struct.pack(">II", address, address + 0x400)
self.s.send(request)
status = self.s.recv(1)
if status == b"\xbd": ret += self.s.recv(0x400)
elif status == b"\xb0": ret += b"\x00" * 0x400
else: raise BaseException("Something went terribly wrong")
address += 0x400;length -= 0x400
print("Current address: " + hexstr0(address))
if length != 0: #Now read the last little bit
self.s.send(b"\x04")
request = struct.pack(">II", address, address + length)
self.s.send(request)
status = self.s.recv(1)
if status == b"\xbd": ret += self.s.recv(length)
elif status == b"\xb0": ret += b"\x00" * length
else: raise BaseException("Something went terribly wrong")
print("Finished!")
else:
self.s.send(b"\x04")
request = struct.pack(">II", address, address + length)
self.s.send(request)
status = self.s.recv(1)
if status == b"\xbd": ret += self.s.recv(length)
elif status == b"\xb0": ret += b"\x00" * length
else: raise BaseException("Something went terribly wrong")
return ret
def readkern(self, address): #Only takes 4 bytes, may need to run multiple times
if not self.validrange(address, 4): raise BaseException("Address range not valid")
if not self.validaccess(address, 4, "write"): raise BaseException("Cannot write to address")
self.s.send(b"\x0C") #cmd_readkern
request = struct.pack(">I", int(address))
self.s.send(request)
value = struct.unpack(">I", self.s.recv(4))[0]
return value
def writekern(self, address, value): #Only takes 4 bytes, may need to run multiple times
if not self.validrange(address, 4): raise BaseException("Address range not valid")
if not self.validaccess(address, 4, "write"): raise BaseException("Cannot write to address")
self.s.send(b"\x0B") #cmd_readkern
print(value)
request = struct.pack(">II", int(address), int(value))
self.s.send(request)
return
def pokemem(self, address, value): #Only takes 4 bytes, may need to run multiple times
if not self.validrange(address, 4): raise BaseException("Address range not valid")
if not self.validaccess(address, 4, "write"): raise BaseException("Cannot write to address")
self.s.send(b"\x03") #cmd_pokemem
request = struct.pack(">II", int(address), int(value))
self.s.send(request) #Done, move on
return
def search32(self, address, value, size):
self.s.send(b"\x72") #cmd_search32
request = struct.pack(">III", address, value, size)
self.s.send(request)
reply = self.s.recv(4)
return struct.unpack(">I", reply)[0]
def getversion(self):
self.s.send(b"\x9A") #cmd_os_version
reply = self.s.recv(4)
return struct.unpack(">I", reply)[0]
def writestr(self, address, string):
if not self.validrange(address, len(string)): raise BaseException("Address range not valid")
if not self.validaccess(address, len(string), "write"): raise BaseException("Cannot write to address")
if type(string) != bytes: string = bytes(string, "UTF-8") #Sanitize
if len(string) % 4: string += bytes((4 - (len(string) % 4)) * b"\x00")
pos = 0
for x in range(int(len(string) / 4)):
self.pokemem(address, struct.unpack(">I", string[pos:pos + 4])[0])
address += 4;pos += 4
return
def memalign(self, size, align):
symbol = self.get_symbol("coreinit.rpl", "MEMAllocFromDefaultHeapEx", True, 1)
symbol = struct.unpack(">I", symbol.address)[0]
address = self.readmem(symbol, 4)
#print("memalign address: " + hexstr0(struct.unpack(">I", address)[0]))
ret = self.call(address, size, align)
return ret
def freemem(self, address):
symbol = self.get_symbol("coreinit.rpl", "MEMFreeToDefaultHeap", True, 1)
symbol = struct.unpack(">I", symbol.address)[0]
addr = self.readmem(symbol, 4)
#print("freemem address: " + hexstr0(struct.unpack(">I", addr)[0]))
self.call(addr, address) #void, no return
def memalloc(self, size, align, noprint=False):
return self.function("coreinit.rpl", "OSAllocFromSystem", noprint, 0, size, align)
def freealloc(self, address):
return self.function("coreinit.rpl", "OSFreeToSystem", True, 0, address)
def createpath(self, path):
if not hasattr(self, "pPath"): self.pPath = self.memalloc(len(path), 0x20, True) #It'll auto-pad
size = len(path) + (32 - (len(path) % 32))
self.function("coreinit.rpl", "memset", True, 0, self.pPath, 0x00, size)
self.writestr(self.pPath, path)
#print("pPath address: " + hexstr0(self.pPath))
def createstr(self, string):
address = self.memalloc(len(string), 0x20, True) #It'll auto-pad
size = len(string) + (32 - (len(string) % 32))
self.function("coreinit.rpl", "memset", True, 0, address, 0x00, size)
self.writestr(address, string)
print("String address: " + hexstr0(address))
return address
def FSInitClient(self):
self.pClient = self.memalign(0x1700, 0x20)
self.function("coreinit.rpl", "FSAddClient", True, 0, self.pClient)
#print("pClient address: " + hexstr0(self.pClient))
def FSInitCmdBlock(self):
self.pCmd = self.memalign(0xA80, 0x20)
self.function("coreinit.rpl", "FSInitCmdBlock", True, 0, self.pCmd)
#print("pCmd address: " + hexstr0(self.pCmd))
def FSOpenDir(self, path="/"):
print("Initializing...")
self.function("coreinit.rpl", "FSInit", True)
if not hasattr(self, "pClient"): self.FSInitClient()
if not hasattr(self, "pCmd"): self.FSInitCmdBlock()
print("Getting memory ready...")
self.createpath(path)
self.pDh = self.memalloc(4, 4, True)
#print("pDh address: " + hexstr0(self.pDh))
print("Calling function...")
ret = self.function("coreinit.rpl", "FSOpenDir", False, 0, self.pClient, self.pCmd, self.pPath, self.pDh, 0xFFFFFFFF)
self.pDh = int(hexlify(self.readmem(self.pDh, 4)), 16)
print("Return value: " + hexstr0(ret))
def SAVEOpenDir(self, path="/", slot=255):
print("Initializing...")
self.function("coreinit.rpl", "FSInit", True, 0)
self.function("nn_save.rpl", "SAVEInit", True, 0, slot)
print("Getting memory ready...")
if not hasattr(self, "pClient"): self.FSInitClient()
if not hasattr(self, "pCmd"): self.FSInitCmdBlock()
self.createpath(path)
self.pDh = self.memalloc(4, 4, True)
#print("pDh address: " + hexstr0(self.pDh))
print("Calling function...")
ret = self.function("nn_save.rpl", "SAVEOpenDir", False, 0, self.pClient, self.pCmd, slot, self.pPath, self.pDh, 0xFFFFFFFF)
self.pDh = int(hexlify(self.readmem(self.pDh, 4)), 16)
print("Return value: " + hexstr0(ret))
def FSReadDir(self):
global printe
if not hasattr(self, "pBuffer"): self.pBuffer = self.memalign(0x164, 0x20)
#print("pBuffer address: " + hexstr0(self.pBuffer))
ret = self.function("coreinit.rpl", "FSReadDir", True, 0, self.pClient, self.pCmd, self.pDh, self.pBuffer, 0xFFFFFFFF)
self.entry = self.readmem(self.pBuffer, 0x164)
printe = getstr(self.entry, 100) + " "
self.FileSystem().printflags(uint32(self.entry, 0), self.entry)
self.FileSystem().printperms(uint32(self.entry, 4))
print(printe)
return self.entry, ret
def SAVEOpenFile(self, path="/", mode="r", slot=255):
print("Initializing...")
self.function("coreinit.rpl", "FSInit", True)
self.function("nn_save.rpl", "SAVEInit", slot, True)
print("Getting memory ready...")
if not hasattr(self, "pClient"): self.FSInitClient()
if not hasattr(self, "pCmd"): self.FSInitCmdBlock()
self.createpath(path)
self.pMode = self.createstr(mode)
self.pFh = self.memalign(4, 4)
#print("pFh address: " + hexstr0(self.pFh))
print("Calling function...")
print("This function may have errors")
#ret = self.function("nn_save.rpl", "SAVEOpenFile", self.pClient, self.pCmd, slot, self.pPath, self.pMode, self.pFh, 0xFFFFFFFF)
#self.pFh = int(self.readmem(self.pFh, 4).encode("hex"), 16)
#print(ret)
def FSReadFile(self):
if not hasattr(self, "pBuffer"): self.pBuffer = self.memalign(0x200, 0x20)
print("pBuffer address: " + hexstr0(self.pBuffer))
ret = self.function("coreinit.rpl", "FSReadFile", False, 0, self.pClient, self.pCmd, self.pBuffer, 1, 0x200, self.pFh, 0, 0xFFFFFFFF)
print(ret)
return tcp.readmem(self.pBuffer, 0x200)
def get_symbol(self, rplname, symname, noprint=False, data=0):
self.s.send(b"\x71") #cmd_getsymbol
request = struct.pack(">II", 8, 8 + len(rplname) + 1) #Pointers
request += rplname.encode("UTF-8") + b"\x00"
request += symname.encode("UTF-8") + b"\x00"
size = struct.pack(">B", len(request))
data = struct.pack(">B", data)
self.s.send(size) #Read this many bytes
self.s.send(request) #Get this symbol
self.s.send(data) #Is it data?
address = self.s.recv(4)
return ExportedSymbol(address, self, rplname, symname, noprint)
def call(self, address, *args):
arguments = list(args)
if len(arguments)>8 and len(arguments)<=16: #Use the big call function
while len(arguments) != 16:
arguments.append(0)
self.s.send(b"\x80")
address = struct.unpack(">I", address)[0]
request = struct.pack(">I16I", address, *arguments)
self.s.send(request)
reply = self.s.recv(8)
return struct.unpack(">I", reply[:4])[0]
elif len(arguments) <= 8: #Use the normal one that dNet client uses
while len(arguments) != 8:
arguments.append(0)
self.s.send(b"\x70")
address = struct.unpack(">I", address)[0]
request = struct.pack(">I8I", address, *arguments)
self.s.send(request)
reply = self.s.recv(8)
return struct.unpack(">I", reply[:4])[0]
else: raise BaseException("Too many arguments!")
#Data last, only a few functions need it, noprint for the big FS/SAVE ones above, acts as gateway for data arg
def function(self, rplname, symname, noprint=False, data=0, *args):
symbol = self.get_symbol(rplname, symname, noprint, data)
ret = self.call(symbol.address, *args)
return ret
def validrange(self, address, length):
if 0x01000000 <= address and address + length <= 0x01800000: return True
elif 0x0E000000 <= address and address + length <= 0x10000000: return True #Depends on game
elif 0x10000000 <= address and address + length <= 0x50000000: return True #Doesn't quite go to 5
elif 0xE0000000 <= address and address + length <= 0xE4000000: return True
elif 0xE8000000 <= address and address + length <= 0xEA000000: return True
elif 0xF4000000 <= address and address + length <= 0xF6000000: return True
elif 0xF6000000 <= address and address + length <= 0xF6800000: return True
elif 0xF8000000 <= address and address + length <= 0xFB000000: return True
elif 0xFB000000 <= address and address + length <= 0xFB800000: return True
elif 0xFFFE0000 <= address and address + length <= 0xFFFFFFFF: return True
else: return False
def validaccess(self, address, length, access):
if 0x01000000 <= address and address + length <= 0x01800000:
if access.lower() == "read": return True
if access.lower() == "write": return False
elif 0x0E000000 <= address and address + length <= 0x10000000: #Depends on game, may be EG 0x0E3
if access.lower() == "read": return True
if access.lower() == "write": return False
elif 0x10000000 <= address and address + length <= 0x50000000:
if access.lower() == "read": return True
if access.lower() == "write": return True
elif 0xE0000000 <= address and address + length <= 0xE4000000:
if access.lower() == "read": return True
if access.lower() == "write": return False
elif 0xE8000000 <= address and address + length <= 0xEA000000:
if access.lower() == "read": return True
if access.lower() == "write": return False
elif 0xF4000000 <= address and address + length <= 0xF6000000:
if access.lower() == "read": return True
if access.lower() == "write": return False
elif 0xF6000000 <= address and address + length <= 0xF6800000:
if access.lower() == "read": return True
if access.lower() == "write": return False
elif 0xF8000000 <= address and address + length <= 0xFB000000:
if access.lower() == "read": return True
if access.lower() == "write": return False
elif 0xFB000000 <= address and address + length <= 0xFB800000:
if access.lower() == "read": return True
if access.lower() == "write": return False
elif 0xFFFE0000 <= address and address + length <= 0xFFFFFFFF:
if access.lower() == "read": return True
if access.lower() == "write": return True
else: return False
class FileSystem: #TODO: Try to clean this up ????
Flags = enum(
IS_DIRECTORY = 0x80000000,
IS_QUOTA = 0x40000000,
SPRT_QUOTA_SIZE = 0x20000000, #Supports .quota_size field
SPRT_ENT_ID = 0x10000000, #Supports .ent_id field
SPRT_CTIME = 0x08000000, #Supports .ctime field
SPRT_MTIME = 0x04000000, #Supports .mtime field
SPRT_ATTRIBUTES = 0x02000000, #Supports .attributes field
SPRT_ALLOC_SIZE = 0x01000000, #Supports .alloc_size field
IS_RAW_FILE = 0x00800000, #Entry isn't encrypted
SPRT_DIR_SIZE = 0x00100000, #Supports .size field, doesn't apply to files
UNSUPPORTED_CHR = 0x00080000) #Entry name has an unsupported character
Permissions = enum( #Pretty self explanitory
OWNER_READ = 0x00004000,
OWNER_WRITE = 0x00002000,
OTHER_READ = 0x00000400,
OTHER_WRITE = 0x00000200)
def printflags(self, flags, data):
global printe
if flags & self.Flags.IS_DIRECTORY: printe += " Directory"
if flags & self.Flags.IS_QUOTA: printe += " Quota"
if flags & self.Flags.SPRT_QUOTA_SIZE: printe += " .quota_size: " + hexstr0(uint32(data, 24))
if flags & self.Flags.SPRT_ENT_ID: printe += " .ent_id: " + hexstr0(uint32(data, 32))
if flags & self.Flags.SPRT_CTIME: printe += " .ctime: " + hexstr0(uint32(data, 36))
if flags & self.Flags.SPRT_MTIME: printe += " .mtime: " + hexstr0(uint32(data, 44))
if flags & self.Flags.SPRT_ATTRIBUTES: pass #weh
if flags & self.Flags.SPRT_ALLOC_SIZE: printe += " .alloc_size: " + hexstr0(uint32(data, 20))
if flags & self.Flags.IS_RAW_FILE: printe += " Raw (Unencrypted) file"
if flags & self.Flags.SPRT_DIR_SIZE: printe += " .dir_size: " + hexstr0(uint64(data, 24))
if flags & self.Flags.UNSUPPORTED_CHR: printe += " !! UNSUPPORTED CHARACTER IN NAME"
def printperms(self, perms):
global printe
if perms & self.Permissions.OWNER_READ: printe += " OWNER_READ"
if perms & self.Permissions.OWNER_WRITE: printe += " OWNER_WRITE"
if perms & self.Permissions.OTHER_READ: printe += " OTHER_READ"
if perms & self.Permissions.OTHER_WRITE: printe += " OTHER_WRITE"
def hexstr0(data): #0xFFFFFFFF, uppercase hex string
return "0x" + hex(data).lstrip("0x").rstrip("L").zfill(8).upper()
class ExportedSymbol(object):
def __init__(self, address, rpc=None, rplname=None, symname=None, noprint=False):
self.address = address
self.rpc = rpc
self.rplname = rplname
self.symname = symname
if not noprint: #Make command prompt not explode when using FS or SAVE functions
print(symname + " address: " + hexstr0(struct.unpack(">I", address)[0]))
def __call__(self, *args):
return self.rpc.call(self.address, *args) #Pass in arguments, run address
|
|
from PIL import Image
from PIL import ImageDraw
from PIL import ImageChops
import random
def message_transition(func):
func.is_message_transition = True
func.is_display_transition = False
def display_transition(func):
func.is_message_transition = False
func.is_display_transition = True
def FlashStarsTransition(current_state,desired_state):
"""
This transition function flashes all asterisks, then blanks, then asterisks, then the desired message.
:param current_state: the current state of the display - again ignored by this function
:param desired_state: the desired display state
:return: a list containing the display states to be passed through
"""
assert type(current_state) == list
num_lines = len(current_state)
num_chars = len(current_state[0])
return [['*'*num_chars]*num_lines, [' '*num_chars]*num_lines, ['*'*num_chars]*num_lines, desired_state]
message_transition(FlashStarsTransition)
def SimpleTransition(current_state,desired_state):
"""
The simplest possible transition -- go to the desired state directly with no fancy stuff.
:param current_state: the current display state -- ignored by this function but included for consistency with other
transition functions.
:param desired_state: the desired display state
:return: in this case, just a single-element list containing the desired state
"""
return [desired_state]
display_transition(SimpleTransition)
def center_wipe(currentstate, desiredstate):
"""
Transition function that wipes from currentstate to desiredstate out from the center in both directions.
:param currentstate: a PIL image object representing the current display state
:param desiredstate: a PIL image object representing the eventual desired display state
:return: a list of PIL image objects representing a transition of display states to get from current to desired
"""
assert isinstance(currentstate, Image.Image)
assert isinstance(desiredstate, Image.Image)
assert currentstate.size == desiredstate.size
# initialize list for transition
output = []
# set initial columns for wipe (possibly same if odd number of pixels)
if desiredstate.size[0] % 2 == 0: # if the number of columns of pixels is even
# set the right and left columns as the middle ones - assuming the indices start at 0
left_column = desiredstate.size[0] / 2 - 1
right_column = desiredstate.size[0] / 2
else: # if the number of columns of pixels is odd
left_column = desiredstate.size[0] / 2 - 0.5
right_column = left_column
# iterate until the wipe has passed the edge
while left_column >= -1:
# create a mask with the right amount of interior area transparent
# note - Image.composite(image1, image2, mask) yields image1 where mask is 1 and image2 where mask is 0
image_mask = Image.new('1',desiredstate.size,1)
ImageDraw.Draw(image_mask).rectangle([left_column, 0, right_column, desiredstate.size[1]-1], fill=0)
# composite the initial image with the desired state using the layer mask
composite = Image.composite(currentstate, desiredstate, image_mask)
# draw vertical lines of all white to create the line doing the wiping
draw = ImageDraw.Draw(composite)
draw.line(xy=[left_column, 0, left_column, desiredstate.size[1]-1], fill=1, width=1)
draw.line(xy=[right_column, 0, right_column, desiredstate.size[1]-1], fill=1, width=1)
# append this new image to the list of images
output.append(composite)
left_column -= 1
right_column += 1
# return the list of images for transition
return output
display_transition(center_wipe)
def dissolve_changes_only(currentstate, desiredstate):
"""
A transition function that changes pixels one by one at random between currentstate and desiredstate. Pixels that
are the same in both images are skipped (no time taken)
:param currentstate: a PIL image object representing the current display state
:param desiredstate: a PIL image object representing the eventual desired display state
:return: a list of PIL image objects representing a transition of display states to get from current to desired
"""
assert isinstance(currentstate, Image.Image)
assert isinstance(desiredstate, Image.Image)
assert currentstate.size == desiredstate.size
# generate a list of all pixel addresses in the image and shuffle it
pixel_addresses = []
for column in range(currentstate.size[0]):
for row in range(currentstate.size[1]):
pixel_addresses.append((column, row))
random.shuffle(pixel_addresses)
output = []
next_image = currentstate.copy()
# for each pixel in the image
for pixel in pixel_addresses:
# if the pixel is different between the input image and the desired one
if currentstate.getpixel(pixel) != desiredstate.getpixel(pixel):
# take the previous image in the output list and change that pixel (currentstate if list is empty)
ImageDraw.Draw(next_image).point(pixel, fill=desiredstate.getpixel(pixel))
# append that image to the output list
output.append(next_image.copy())
return output
display_transition(dissolve_changes_only)
def push_up(current_state, desired_state):
"""
A transition function that raises the desired state up from the bottom of the screen, "pushing" the current state
off the top. One blank line is inserted between.
:param current_state: a PIL image object representing the current display state
:param desired_state: a PIL image object representing the eventual desired display state
:return: a list of PIL image objects representing a transition of display states to get from current to desired
"""
assert isinstance(current_state, Image.Image)
assert isinstance(desired_state, Image.Image)
assert current_state.size == desired_state.size
output = []
current_state_y_val = -1
desired_state_y_val = current_state.size[1]
# while the desired image has not reached the top
while desired_state_y_val >= 0:
# initialize next image
next = Image.new("1", current_state.size, color=0)
# paste current state at its y valute
next.paste(current_state, (0, current_state_y_val))
# paste desired state at its y value
next.paste(desired_state, (0, desired_state_y_val))
# increment y vales
current_state_y_val -= 1
desired_state_y_val -= 1
# append output
output.append(next)
output.append(desired_state)
# return the output
return output
display_transition(push_up)
def push_down(current_state, desired_state):
"""
A transition function that raises the desired state down from the top of the screen, "pushing" the current state
off the bottom. One blank line is inserted between.
:param current_state: a PIL image object representing the current display state
:param desired_state: a PIL image object representing the eventual desired display state
:return: a list of PIL image objects representing a transition of display states to get from current to desired
"""
assert isinstance(current_state, Image.Image)
assert isinstance(desired_state, Image.Image)
assert current_state.size == desired_state.size
output = []
current_state_y_val = 1
desired_state_y_val = 0 - current_state.size[1]
# while the desired image has not reached the top
while desired_state_y_val <= 0:
# initialize next image
next = Image.new("1", current_state.size, color=0)
# paste current state at its y valute
next.paste(current_state, (0, current_state_y_val))
# paste desired state at its y value
next.paste(desired_state, (0, desired_state_y_val))
# increment y vales
current_state_y_val += 1
desired_state_y_val += 1
# append output
output.append(next)
# return the output
return output
display_transition(push_down)
def push_right(current_state, desired_state):
"""
A transition function that raises the desired state right from the left of the screen, "pushing" the current state
off the right. One blank line is inserted between.
:param current_state: a PIL image object representing the current display state
:param desired_state: a PIL image object representing the eventual desired display state
:return: a list of PIL image objects representing a transition of display states to get from current to desired
"""
assert isinstance(current_state, Image.Image)
assert isinstance(desired_state, Image.Image)
assert current_state.size == desired_state.size
output = []
current_state_x_val = 1
desired_state_x_val = 0 - current_state.size[0]
# while the desired image has not reached the top
while desired_state_x_val <= 0:
# initialize next image
next = Image.new("1", current_state.size, color=0)
# paste current state at its y valute
next.paste(current_state, (current_state_x_val,0))
# paste desired state at its y value
next.paste(desired_state, (desired_state_x_val,0))
# increment y vales
current_state_x_val += 1
desired_state_x_val += 1
# append output
output.append(next)
# return the output
return output
display_transition(push_right)
def push_left(current_state, desired_state):
"""
A transition function that raises the desired state right from the left of the screen, "pushing" the current state
off the right. One blank line is inserted between.
:param current_state: a PIL image object representing the current display state
:param desired_state: a PIL image object representing the eventual desired display state
:return: a list of PIL image objects representing a transition of display states to get from current to desired
"""
assert isinstance(current_state, Image.Image)
assert isinstance(desired_state, Image.Image)
assert current_state.size == desired_state.size
output = []
current_state_x_val = -1
desired_state_x_val = current_state.size[0]
# while the desired image has not reached the top
while desired_state_x_val >= 0:
# initialize next image
next = Image.new("1", current_state.size, color=0)
# paste current state at its y valute
next.paste(current_state, (current_state_x_val,0))
# paste desired state at its y value
next.paste(desired_state, (desired_state_x_val,0))
# increment y vales
current_state_x_val -= 1
desired_state_x_val -= 1
# append output
output.append(next)
# return the output
return output
display_transition(push_left)
def ellipse_wipe(current_state, desired_state):
"""
A transition function that draws an ellipse which gradually grows, revealing the desired state inside the ellipse.
:param current_state: a PIL image object representing the current display state. Must be larger than 4x4.
:param desired_state: a PIL image object representing the eventual desired display state. Must be larger than 4x4.
:return: a list of PIL image objects representing a transition of display states to get from current to desired
"""
assert isinstance(current_state, Image.Image)
assert isinstance(desired_state, Image.Image)
assert current_state.size == desired_state.size
assert current_state.size[0] > 4 and current_state.size[1] > 4 #needs to be larger to make the loop work properly
# initialize list for transition
output = []
# generate starting values for top, bottom, left and right
if desired_state.size[0] % 2 == 0: # if the number of columns of pixels is even
# set the right and left columns as the middle ones - assuming the indices start at 0
left_column = desired_state.size[0] / 2 - 1
right_column = desired_state.size[0] / 2
else: # if the number of columns of pixels is odd
left_column = desired_state.size[0] / 2 - 0.5
right_column = left_column
if desired_state.size[1] % 2 == 0: # if the number of rows of pixels is even
# set the top and bottom rows as the middle ones - assuming the indices start at 0
top_row = desired_state.size[1] / 2 - 1
bottom_row = desired_state.size[1] / 2
else: # if the number of rows of pixels is odd
top_row = desired_state.size[1] / 2 - 0.5
bottom_row = top_row
# Start off the while loop operator as True to get into looping
keep_going = True
# while we haven't reached the left edge yet
while keep_going:
# create a mask with the right amount of interior area transparent
image_mask = Image.new('1',desired_state.size,1)
ImageDraw.Draw(image_mask).ellipse([left_column, top_row, right_column, bottom_row], fill=0, outline=0)
# create a composite image of the desired and current states using the mask
composite = Image.composite(current_state, desired_state, image_mask)
# draw the ellipse
draw = ImageDraw.Draw(composite)
draw.ellipse([left_column, top_row, right_column, bottom_row], fill=None, outline=1)
# add the image the output list
output.append(composite)
# increment the ellipse size
if current_state.size[0] > current_state.size[1]: # if there are more columns than rows
left_column -= 1
right_column += 1
top_row = min(top_row, int(left_column / current_state.size[0] * current_state.size[1]))
bottom_row = max(bottom_row, int(right_column / current_state.size[0] * current_state.size[1]))
else: # there must be more rows than columns, or there are an equal number in which case either algo is fine
top_row -= 1
bottom_row += 1
left_column = min(left_column, int(top_row / current_state.size[1] * current_state.size[0]))
right_column = max(left_column, int(bottom_row / current_state.size[1] * current_state.size[0]))
# determine whether we are done
try:
# this is a quick way to see if the two images are the same
# if the two images are the same, the difference image will be all zeros and the bounding box will be None
keep_going = ImageChops.difference(output[-2], output[-1]).getbbox() is not None
except IndexError: # indexerror means we don't have 2 images in the outputs list yet
keep_going = True
# return the output list, except the last one which we know is the same as the one before
return output[:-1]
display_transition(ellipse_wipe)
|
|
# ------------------------------------------------------------------------------------------------
# Copyright (c) 2016 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ------------------------------------------------------------------------------------------------
import MalmoPython
import os
import random
import sys
import time
import errno
import Tkinter as tk
#from PIL import Image, ImageTk - we need these for displaying our own gui, but it's a nightmare installing the dependencies correctly.
from array import array
from struct import pack
gameNum = 0
iterations = 5
root = tk.Tk()
root.wm_title("Video Output")
canvas = tk.Canvas(root, width=160, height=210, borderwidth=0, highlightthickness=0, bg="white")
canvas.pack()
def callback(event):
sendCommand() # kick things off
canvas.bind("<Button-1>", callback)
os.system('xset r off') # Nasty, but we need to turn off keyboard auto-repeat to track key states.
# Track which keys are pressed
left = 0
right = 0
up = 0
down = 0
fire = 0
def keyUp(event):
global left, right, up, down, fire
if event.keysym == 'Escape':
root.destroy()
if event.keysym == 'Right':
right = 0
if event.keysym == 'Left':
left = 0
if event.keysym == 'Up':
up = 0
if event.keysym == 'Down':
down = 0
if event.keysym == 'space':
fire = 0
def keyDown(event):
global left, right, up, down, fire
if event.keysym == 'Right':
right = 1
left = 0 # left and right are mutually exclusive
if event.keysym == 'Left':
left = 1
right = 0
if event.keysym == 'Up':
up = 1
down = 0 # up and down are mutally exclusive
if event.keysym == 'Down':
down = 1
up = 0
if event.keysym == 'space':
fire = 1
# ALE op-codes:
# PLAYER_A_NOOP = 0,
# PLAYER_A_FIRE = 1,
# PLAYER_A_UP = 2,
# PLAYER_A_RIGHT = 3,
# PLAYER_A_LEFT = 4,
# PLAYER_A_DOWN = 5,
# PLAYER_A_UPRIGHT = 6,
# PLAYER_A_UPLEFT = 7,
# PLAYER_A_DOWNRIGHT = 8,
# PLAYER_A_DOWNLEFT = 9,
# PLAYER_A_UPFIRE = 10,
# PLAYER_A_RIGHTFIRE = 11,
# PLAYER_A_LEFTFIRE = 12,
# PLAYER_A_DOWNFIRE = 13,
# PLAYER_A_UPRIGHTFIRE = 14,
# PLAYER_A_UPLEFTFIRE = 15,
# PLAYER_A_DOWNRIGHTFIRE = 16,
# PLAYER_A_DOWNLEFTFIRE = 17
allops=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17] # all allowed op-codes
leftops=[4,7,9,12,15,17] # op-codes with left pressed
rightops=[3,6,8,11,14,16] # op-codes with right pressed
upops=[2,6,7,10,14,15] # op-codes with up pressed
downops=[5,8,9,13,16,17] # op-codes with down pressed
fireops=[1,10,11,12,13,14,15,16,17] # op-codes with fire pressed
def startGame():
#Find filename for the recording:
filenum = 0
fileRecording = ''
while fileRecording == '':
fileRecording = recordingsDirectory+'/saved_data'+str(filenum)+'.tar.gz'
if os.path.isfile(fileRecording):
filenum = filenum + 1
fileRecording = ''
my_mission_record = MalmoPython.MissionRecordSpec(fileRecording)
my_mission_record.recordCommands()
my_mission_record.recordMP4(20, 400000)
my_mission_record.recordRewards()
my_mission_record.recordObservations()
try:
display_gui = 1
if want_own_display:
display_gui = 0
agent_host.startMission( my_mission, MalmoPython.ClientPool(), my_mission_record, display_gui, rom_file )
except RuntimeError as e:
print "Error starting mission:",e
exit(1)
print "Waiting for the mission to start",
world_state = agent_host.getWorldState()
while not world_state.is_mission_running:
sys.stdout.write(".")
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
print "Error:",error.text
print
gamestats = "Go " + str(gameNum+1) + " out of " + str(iterations) + "\n"
canvas.delete("all")
canvas.create_text(80, 105, text=gamestats + "Click to begin!\nEscape to end") # The window needs keyboard focus or no way to control game.
def sendCommand():
global gameNum
ops = set(allops)
# Narrow down the op-codes by the keys pressed:
if left:
ops = ops & set(leftops)
else:
ops = ops - set(leftops)
if right:
ops = ops & set(rightops)
else:
ops = ops - set(rightops)
if up:
ops = ops & set(upops)
else:
ops = ops - set(upops)
if down:
ops = ops & set(downops)
else:
ops = ops - set(downops)
if fire:
ops = ops & set(fireops)
else:
ops = ops - set(fireops)
if len(ops) > 0:
agent_host.sendCommand( str(list(ops)[0]) ) # If no keys pressed will send no-op
# The ALE only updates in response to a command, so get the new world state now.
world_state = agent_host.getWorldState()
for reward in world_state.rewards:
if reward.getValue() > 0:
print "Summed reward:",reward.getValue()
for error in world_state.errors:
print "Error:",error.text
if world_state.number_of_video_frames_since_last_state > 0 and want_own_display:
# Turn the frame into an image to display on our canvas.
# On my system creating buff was too slow to be usable, whichever of these three apporaches I tried:
buff = str(bytearray(world_state.video_frames[-1].pixels))
# Or buff = pack('100800B', *(world_state.video_frames[-1].pixels))
# Or buff = array('B', world_state.video_frames[-1].pixels)
image = Image.frombytes('RGB', (320,420), buff)
photo = ImageTk.PhotoImage(image)
canvas.delete("all")
canvas.create_image(80,105, image=photo)
root.update()
if world_state.is_mission_running:
canvas.after(0, sendCommand) # Call sendCommand again as soon as possible within tkinter's event loop.
else:
gameNum = gameNum + 1
if gameNum < iterations:
startGame()
else:
root.destroy() # We are done.
root.bind_all('<KeyPress>', keyDown)
root.bind_all('<KeyRelease>', keyUp)
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # flush print output immediately
agent_host = MalmoPython.ALEAgentHost()
# add some arguments:
agent_host.addOptionalStringArgument('rom_file', 'Path/to/ROM from which to load the game.', '../ALE_ROMS/montezuma_revenge')
agent_host.addOptionalFlag('own_display', 'Display frames direct from Malmo')
agent_host.addOptionalIntArgument('goes', 'Number of goes at the game.', 2)
try:
agent_host.parse( sys.argv )
except RuntimeError as e:
print 'ERROR:',e
print agent_host.getUsage()
exit(1)
if agent_host.receivedArgument("help"):
print agent_host.getUsage()
exit(0)
rom_file = agent_host.getStringArgument('rom_file')
want_own_display = agent_host.receivedArgument('own_display')
iterations = agent_host.getIntArgument('goes')
my_mission = MalmoPython.MissionSpec()
my_mission.requestVideo( 210, 160 )
recordingsDirectory = rom_file.rpartition('/')[-1]+'_recordings'
try:
os.makedirs(recordingsDirectory)
except OSError as exception:
if exception.errno != errno.EEXIST: #ignore error if already existed
raise
startGame() # Get things up and ready...
root.mainloop() # and enter the event loop
print "Mission has stopped."
os.system('xset r on') # set auto-repeat back
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
"""Lookup operations."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_lookup_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
@deprecated(None, "Use `tf.tables_initializer` instead.")
def initialize_all_tables(name="init_all_tables"):
"""Returns an Op that initializes all tables of the default graph.
Args:
name: Optional name for the initialization op.
Returns:
An Op that initializes all tables. Note that if there are
not tables the returned Op is a NoOp.
"""
return tables_initializer(name)
def tables_initializer(name="init_all_tables"):
"""Returns an Op that initializes all tables of the default graph.
Args:
name: Optional name for the initialization op.
Returns:
An Op that initializes all tables. Note that if there are
not tables the returned Op is a NoOp.
"""
initializers = ops.get_collection(ops.GraphKeys.TABLE_INITIALIZERS)
if initializers:
return control_flow_ops.group(*initializers, name=name)
return control_flow_ops.no_op(name=name)
def _check_table_dtypes(table, key_dtype, value_dtype):
"""Check that the given key_dtype and value_dtype matches the table dtypes.
Args:
table: The table to check types against to.
key_dtype: The key data type to check.
value_dtype: The value data type to check.
Raises:
TypeError: when 'key_dtype' or 'value_dtype' doesn't match the table data
types.
"""
if key_dtype != table.key_dtype:
raise TypeError("Invalid key dtype, expected %s but got %s." %
(table.key_dtype, key_dtype))
if value_dtype != table.value_dtype:
raise TypeError("Invalid value dtype, expected %s but got %s." %
(table.value_dtype, value_dtype))
class LookupInterface(object):
"""Represent a lookup table that persists across different steps."""
def __init__(self, key_dtype, value_dtype, name):
"""Construct a lookup table interface.
Args:
key_dtype: The table key type.
value_dtype: The table value type.
name: A name for the operation (optional).
"""
self._key_dtype = dtypes.as_dtype(key_dtype)
self._value_dtype = dtypes.as_dtype(value_dtype)
self._name = name
@property
def key_dtype(self):
"""The table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The table value dtype."""
return self._value_dtype
@property
def name(self):
"""The name of the table."""
return self._name
@property
def init(self):
"""The table initialization op."""
raise NotImplementedError
def size(self, name=None):
"""Compute the number of elements in this table."""
raise NotImplementedError
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values."""
raise NotImplementedError
class InitializableLookupTableBase(LookupInterface):
"""Initializable lookup table interface.
An initializable lookup tables persist across different steps.
"""
def __init__(self, table_ref, default_value, initializer):
"""Construct a table object from a table reference.
If requires a table initializer object (subclass of `TableInitializerBase`).
It provides the table key and value types, as well as the op to initialize
the table. The caller is responsible to execute the initialization op.
Args:
table_ref: The table reference, i.e. the output of the lookup table ops.
default_value: The value to use if a key is missing in the table.
initializer: The table initializer to use.
"""
if context.in_graph_mode():
name = table_ref.op.name.split("/")[-1]
else:
name = context.context().scope_name
super(InitializableLookupTableBase,
self).__init__(initializer.key_dtype, initializer.value_dtype,
name)
self._table_ref = table_ref
self._default_value = ops.convert_to_tensor(
default_value, dtype=self._value_dtype)
self._default_value.get_shape().merge_with(tensor_shape.scalar())
self._init = initializer.initialize(self)
@property
def table_ref(self):
"""Get the underlying table reference."""
return self._table_ref
@property
def default_value(self):
"""The default value of the table."""
return self._default_value
@property
def init(self):
"""The table initialization op."""
return self._init
def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
with ops.name_scope(name, "%s_Size" % self._name,
[self._table_ref]) as scope:
# pylint: disable=protected-access
return gen_lookup_ops._lookup_table_size_v2(self._table_ref, name=scope)
# pylint: enable=protected-access
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is used for keys not present in the table.
Args:
keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.
name: A name for the operation (optional).
Returns:
A `SparseTensor` if keys are sparse, otherwise a dense `Tensor`.
Raises:
TypeError: when `keys` or `default_value` doesn't match the table data
types.
"""
key_tensor = keys
if isinstance(keys, sparse_tensor.SparseTensor):
key_tensor = keys.values
if keys.dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
with ops.name_scope(name, "%s_Lookup" % self._name,
(self._table_ref, key_tensor,
self._default_value)) as scope:
# pylint: disable=protected-access
values = gen_lookup_ops._lookup_table_find_v2(
self._table_ref, key_tensor, self._default_value, name=scope)
# pylint: enable=protected-access
values.set_shape(key_tensor.get_shape())
if isinstance(keys, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(keys.indices, values, keys.dense_shape)
else:
return values
class HashTable(InitializableLookupTableBase):
"""A generic hash table implementation.
Example usage:
```python
table = tf.HashTable(
tf.KeyValueTensorInitializer(keys, values), -1)
out = table.lookup(input_tensor)
table.init.run()
print(out.eval())
```
"""
def __init__(self, initializer, default_value, shared_name=None, name=None):
"""Creates a non-initialized `HashTable` object.
Creates a table, the type of its keys and values are specified by the
initializer.
Before using the table you will have to initialize it. After initialization
the table will be immutable.
Args:
initializer: The table initializer to use. See `HashTable` kernel for
supported key and value types.
default_value: The value to use if a key is missing in the table.
shared_name: If non-empty, this table will be shared under
the given name across multiple sessions.
name: A name for the operation (optional).
Returns:
A `HashTable` object.
"""
with ops.name_scope(name, "hash_table", (initializer,
default_value)) as scope:
# pylint: disable=protected-access
table_ref = gen_lookup_ops._hash_table_v2(
shared_name=shared_name,
key_dtype=initializer.key_dtype,
value_dtype=initializer.value_dtype,
name=scope)
# pylint: enable=protected-access
super(HashTable, self).__init__(table_ref, default_value, initializer)
class TableInitializerBase(object):
"""Base class for lookup table initializers."""
def __init__(self, key_dtype, value_dtype):
"""Construct a table initializer object.
Args:
key_dtype: Type of the table keys.
value_dtype: Type of the table values.
"""
self._key_dtype = dtypes.as_dtype(key_dtype)
self._value_dtype = dtypes.as_dtype(value_dtype)
@property
def key_dtype(self):
"""The expected table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The expected table value dtype."""
return self._value_dtype
def initialize(self, table):
"""Returns the table initialization op."""
raise NotImplementedError
class KeyValueTensorInitializer(TableInitializerBase):
"""Table initializers given `keys` and `values` tensors."""
def __init__(self, keys, values, key_dtype=None, value_dtype=None, name=None):
"""Constructs a table initializer object based on keys and values tensors.
Args:
keys: The tensor for the keys.
values: The tensor for the values.
key_dtype: The `keys` data type. Used when `keys` is a python array.
value_dtype: The `values` data type. Used when `values` is a python array.
name: A name for the operation (optional).
"""
with ops.name_scope(name, "key_value_init", [keys, values]) as scope:
self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name="keys")
self._values = ops.convert_to_tensor(
values, dtype=value_dtype, name="values")
self._name = scope
super(KeyValueTensorInitializer, self).__init__(self._keys.dtype,
self._values.dtype)
def initialize(self, table):
"""Initializes the given `table` with `keys` and `values` tensors.
Args:
table: The table to initialize.
Returns:
The operation that initializes the table.
Raises:
TypeError: when the keys and values data types do not match the table
key and value data types.
"""
_check_table_dtypes(table, self._keys.dtype, self._values.dtype)
with ops.name_scope(
self._name, values=(table.table_ref, self._keys,
self._values)) as scope:
# pylint: disable=protected-access
init_op = gen_lookup_ops._initialize_table_v2(
table.table_ref, self._keys, self._values, name=scope)
# pylint: enable=protected-access
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
return init_op
class TextFileIndex(object):
WHOLE_LINE = -2
LINE_NUMBER = -1
class TextFileInitializer(TableInitializerBase):
"""Table initializers from a text file.
This initializer assigns one entry in the table for each line in the file.
The key and value type of the table to initialize is given by `key_dtype` and
`value_dtype`.
The key and value content to get from each line is specified by
the `key_index` and `value_index`.
* `TextFileIndex.LINE_NUMBER` means use the line number starting from zero,
expects data type int64.
* `TextFileIndex.WHOLE_LINE` means use the whole line content, expects data
type string.
* A value `>=0` means use the index (starting at zero) of the split line based
on `delimiter`.
For example if we have a file with the following content:
```
emerson 10
lake 20
palmer 30
```
The following snippet initializes a table with the first column as keys and
second column as values:
* `emerson -> 10`
* `lake -> 20`
* `palmer -> 30`
```python
table = tf.contrib.lookup.HashTable(tf.contrib.lookup.TextFileInitializer(
"test.txt", tf.string, 0, tf.int64, 1, delimiter=" "), -1)
...
table.init.run()
```
Similarly to initialize the whole line as keys and the line number as values.
* `emerson 10 -> 0`
* `lake 20 -> 1`
* `palmer 30 -> 2`
```python
table = tf.contrib.lookup.HashTable(tf.contrib.lookup.TextFileInitializer(
"test.txt", tf.string, tf.contrib.lookup.TextFileIndex.WHOLE_LINE,
tf.int64, tf.contrib.lookup.TextFileIndex.LINE_NUMBER, delimiter=" "), -1)
...
table.init.run()
```
"""
def __init__(self,
filename,
key_dtype,
key_index,
value_dtype,
value_index,
vocab_size=None,
delimiter="\t",
name=None):
"""Constructs a table initializer object to populate from a text file.
It generates one key-value pair per line. The type of table key and
value are specified by `key_dtype` and `value_dtype`, respectively.
Similarly the content of the key and value are specified by the key_index
and value_index.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_dtype: The `key` data type.
key_index: the index that represents information of a line to get the
table 'key' values from.
value_dtype: The `value` data type.
value_index: the index that represents information of a line to get the
table 'value' values from.'
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: A name for the operation (optional).
Raises:
ValueError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
if not isinstance(filename, ops.Tensor) and not filename:
raise ValueError("Filename required for %s." % name)
key_dtype = dtypes.as_dtype(key_dtype)
value_dtype = dtypes.as_dtype(value_dtype)
if key_index < -2:
raise ValueError("Invalid key index %s." % (key_index))
if key_index == TextFileIndex.LINE_NUMBER and key_dtype != dtypes.int64:
raise ValueError("Signature mismatch. Keys must be dtype %s, got %s." %
(dtypes.int64, key_dtype))
if ((key_index == TextFileIndex.WHOLE_LINE) and
(not key_dtype.is_integer) and (key_dtype != dtypes.string)):
raise ValueError(
"Signature mismatch. Keys must be integer or string, got %s." %
key_dtype)
if value_index < -2:
raise ValueError("Invalid value index %s." % (value_index))
if value_index == TextFileIndex.LINE_NUMBER and value_dtype != dtypes.int64:
raise ValueError("Signature mismatch. Values must be dtype %s, got %s." %
(dtypes.int64, value_dtype))
if value_index == TextFileIndex.WHOLE_LINE and value_dtype != dtypes.string:
raise ValueError("Signature mismatch. Values must be dtype %s, got %s." %
(dtypes.string, value_dtype))
if (vocab_size is not None) and (vocab_size <= 0):
raise ValueError("Invalid vocab_size %s." % vocab_size)
self._filename = filename
self._key_index = key_index
self._value_index = value_index
self._vocab_size = vocab_size
self._delimiter = delimiter
self._name = name
super(TextFileInitializer, self).__init__(key_dtype, value_dtype)
def initialize(self, table):
"""Initializes the table from a text file.
Args:
table: The table to be initialized.
Returns:
The operation that initializes the table.
Raises:
TypeError: when the keys and values data types do not match the table
key and value data types.
"""
_check_table_dtypes(table, self.key_dtype, self.value_dtype)
with ops.name_scope(self._name, "text_file_init",
(table.table_ref,)) as scope:
filename = ops.convert_to_tensor(
self._filename, dtypes.string, name="asset_filepath")
# pylint: disable=protected-access
init_op = gen_lookup_ops._initialize_table_from_text_file_v2(
table.table_ref,
filename,
self._key_index,
self._value_index,
-1 if self._vocab_size is None else self._vocab_size,
self._delimiter,
name=scope)
# pylint: enable=protected-access
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
# If the filename tensor is anything other than a string constant (e.g., if
# it is a placeholder) then it does not make sense to track it as an asset.
if constant_op.is_constant(filename):
ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, filename)
return init_op
class TextFileStringTableInitializer(TextFileInitializer):
"""Table initializer for `int64` IDs to string tables from a text file."""
def __init__(self,
filename,
key_column_index=TextFileIndex.LINE_NUMBER,
value_column_index=TextFileIndex.WHOLE_LINE,
vocab_size=None,
delimiter="\t",
name="text_file_string_table_init"):
"""Constructs an initializer for an id-to-string table from a text file.
It populates a table that its key and value types are int64 and string,
respectively. It generates one key-value pair per line.
The content of the key and value are specified by `key_column_index`
and `value_column_index`.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_column_index: The column index from the text file to get the keys
from. The default is 0 that represents the whole line content.
value_column_index: The column index from the text file to get the
values from. The default is to use the line number, starting from zero.
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: Optional name for the op.
Raises:
TypeError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
super(TextFileStringTableInitializer, self).__init__(
filename,
dtypes.int64,
key_column_index,
dtypes.string,
value_column_index,
vocab_size=vocab_size,
delimiter=delimiter,
name=name)
class TextFileIdTableInitializer(TextFileInitializer):
"""Table initializer for string to `int64` IDs tables from a text file."""
def __init__(self,
filename,
key_column_index=TextFileIndex.WHOLE_LINE,
value_column_index=TextFileIndex.LINE_NUMBER,
vocab_size=None,
delimiter="\t",
name="text_file_id_table_init",
key_dtype=dtypes.string):
"""Constructs an initializer for an string-to-id table from a text file.
It populates a table that its key and value types are string and int64,
respectively. It generates one key-value pair per line.
The content of the key and value are specified by the key_index
and value_index.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_column_index: The column index from the text file to get the `key`
values from. The default is to use the line number, starting from zero.
value_column_index: The column index from the text file ro get the `value`
values from. The default is 0 that represents the whole line content.
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: Optional name for the op.
key_dtype: The `key` data type.
Raises:
TypeError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
super(TextFileIdTableInitializer, self).__init__(
filename,
key_dtype,
key_column_index,
dtypes.int64,
value_column_index,
vocab_size=vocab_size,
delimiter=delimiter,
name=name)
class HasherSpec(collections.namedtuple("HasherSpec", ["hasher", "key"])):
"""A structure for the spec of the hashing function to use for hash buckets.
`hasher` is the name of the hashing function to use (eg. "fasthash",
"stronghash").
`key` is optional and specify the key to use for the hash function if
supported, currently only used by a strong hash.
Fields:
hasher: The hasher name to use.
key: The key to be used by the hashing function, if required.
"""
__slots__ = ()
FastHashSpec = HasherSpec("fasthash", None) # pylint: disable=invalid-name
class StrongHashSpec(HasherSpec):
"""A structure to specify a key of the strong keyed hash spec.
The strong hash requires a `key`, which is a list of 2 unsigned integer
numbers. These should be non-zero; random numbers generated from random.org
would be a fine choice.
Fields:
key: The key to be used by the keyed hashing function.
"""
__slots__ = ()
def __new__(cls, key):
if len(key) != 2:
raise ValueError("key must have size 2, got %s." % len(key))
if not isinstance(key[0], compat.integral_types) or not isinstance(
key[1], compat.integral_types):
raise TypeError("Invalid key %s. Must be unsigned integer values." % key)
return super(cls, StrongHashSpec).__new__(cls, "stronghash", key)
def _as_string(tensor):
if dtypes.string == tensor.dtype.base_dtype:
return tensor
return string_ops.as_string(tensor)
class IdTableWithHashBuckets(LookupInterface):
"""String to Id table wrapper that assigns out-of-vocabulary keys to buckets.
For example, if an instance of `IdTableWithHashBuckets` is initialized with a
string-to-id table that maps:
- emerson -> 0
- lake -> 1
- palmer -> 2
The `IdTableWithHashBuckets` object will performs the following mapping:
- emerson -> 0
- lake -> 1
- palmer -> 2
- <other term> -> bucket id between 3 and 3 + num_oov_buckets - 1, calculated
by: hash(<term>) % num_oov_buckets + vocab_size
If input_tensor is ["emerson", "lake", "palmer", "king", "crimson"],
the lookup result is [0, 1, 2, 4, 7]
If `table` is None, only out-of-vocabulary buckets are used.
Example usage:
```python
num_oov_buckets = 3
input_tensor = tf.constant(["emerson", "lake", "palmer", "king", "crimnson"])
table = tf.IdTableWithHashBuckets(
tf.HashTable(tf.TextFileIdTableInitializer(filename), default_value),
num_oov_buckets)
out = table.lookup(input_tensor).
table.init.run()
print(out.eval())
```
The hash function used for generating out-of-vocabulary buckets ID is handled
by `hasher_spec`.
"""
def __init__(self,
table,
num_oov_buckets,
hasher_spec=FastHashSpec,
name=None,
key_dtype=None):
"""Construct a `IdTableWithHashBuckets` object.
Args:
table: Table that maps `tf.string` or `tf.int64` keys to `tf.int64` ids.
num_oov_buckets: Number of buckets to use for out-of-vocabulary keys.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignation of out-of-vocabulary buckets (optional).
name: A name for the operation (optional).
key_dtype: Data type of keys passed to `lookup`. Defaults to
`table.key_dtype` if `table` is specified, otherwise `tf.string`.
Must be string or integer, and must be castable to `table.key_dtype`.
Raises:
ValueError: when `table` in None and `num_oov_buckets` is not positive.
TypeError: when `hasher_spec` is invalid.
"""
# If a name ends with a '/' it is a "name scope", remove all trailing '/'
# characters to use as table name.
if name:
name = name.rstrip("/")
if table:
if key_dtype is None:
key_dtype = table.key_dtype
supported_table_key_dtypes = (dtypes.int64, dtypes.string)
if table.key_dtype not in supported_table_key_dtypes:
raise TypeError("Invalid key dtype, expected one of %s, but got %s." %
(supported_table_key_dtypes, key_dtype))
if table.key_dtype.is_integer != key_dtype.is_integer:
raise TypeError("Invalid key dtype, expected %s but got %s." %
("integer" if key_dtype.is_integer else "non-integer",
table.key_dtype))
if table.value_dtype != dtypes.int64:
raise TypeError("Invalid value dtype, expected %s but got %s." %
(dtypes.int64, table.value_dtype))
self._table = table
name = name or self._table.name
else:
if num_oov_buckets <= 0:
raise ValueError("oov_buckets must be > 0 if no table is supplied.")
key_dtype = dtypes.string if key_dtype is None else key_dtype
self._table = None
name = name or "hash_bucket"
if (not key_dtype.is_integer) and (dtypes.string != key_dtype):
raise TypeError(
"Invalid key_dtype, expected integer or string, got %s." % key_dtype)
self._num_oov_buckets = num_oov_buckets
if not isinstance(hasher_spec, HasherSpec):
raise TypeError(
"hasher_spec must be of type HasherSpec, got %s" % hasher_spec)
self._hasher_spec = hasher_spec
super(IdTableWithHashBuckets, self).__init__(key_dtype, dtypes.int64,
name.split("/")[-1])
@property
def init(self):
"""The table initialization op."""
if self._table:
return self._table.init
with ops.name_scope(None, "init"):
return control_flow_ops.no_op()
def size(self, name=None):
"""Compute the number of elements in this table."""
with ops.name_scope(name, "%s_Size" % self.name) as scope:
if self._table:
tsize = self._table.size(scope)
else:
tsize = ops.convert_to_tensor(0, dtype=dtypes.int64)
return tsize + self._num_oov_buckets
def _get_string_to_hash_bucket_fn(self, hasher_spec):
"""Returns the string_to_hash_bucket op to use based on `hasher_spec`."""
if not isinstance(hasher_spec, HasherSpec):
raise TypeError("hasher_spec must be of type HasherSpec %s" % hasher_spec)
if hasher_spec.hasher == "fasthash":
return string_ops.string_to_hash_bucket_fast
if hasher_spec.hasher == "legacy":
return string_ops.string_to_hash_bucket
if hasher_spec.hasher == "stronghash":
return functools.partial(
string_ops.string_to_hash_bucket_strong, key=hasher_spec.key)
raise ValueError("Unknown hasher %s" % hasher_spec.hasher)
def lookup(self, keys, name=None):
"""Looks up `keys` in the table, outputs the corresponding values.
It assigns out-of-vocabulary keys to buckets based in their hashes.
Args:
keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.
name: Optional name for the op.
Returns:
A `SparseTensor` if keys are sparse, otherwise a dense `Tensor`.
Raises:
TypeError: when `keys` doesn't match the table key data type.
"""
if keys.dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
values = keys
if isinstance(keys, sparse_tensor.SparseTensor):
values = keys.values
if self._table and (self._table.key_dtype.base_dtype == dtypes.int64):
values = math_ops.to_int64(values)
if self._num_oov_buckets == 0:
ids = self._table.lookup(values, name=name)
else:
# TODO(yleon): Consider moving this functionality to its own kernel.
with ops.name_scope(name, "%s_Lookup" % self.name) as scope:
str_to_hash_bucket = self._get_string_to_hash_bucket_fn(
self._hasher_spec)
buckets = str_to_hash_bucket(
_as_string(values),
num_buckets=self._num_oov_buckets,
name="hash_bucket")
if self._table:
ids = self._table.lookup(values)
buckets = math_ops.add(buckets, self._table.size())
is_id_non_default = math_ops.not_equal(ids, self._table.default_value)
ids = array_ops.where(is_id_non_default, ids, buckets, name=scope)
else:
ids = buckets
if isinstance(keys, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(keys.indices, ids, keys.dense_shape)
return ids
def index_table_from_file(vocabulary_file=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1,
hasher_spec=FastHashSpec,
key_dtype=dtypes.string,
name=None):
"""Returns a lookup table that converts a string tensor into int64 IDs.
This operation constructs a lookup table to convert tensor of strings into
int64 IDs. The mapping can be initialized from a vocabulary file specified in
`vocabulary_file`, where the whole line is the key and the zero-based line
number is the ID.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is
`[vocabulary size, vocabulary size + num_oov_buckets - 1]`.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Sample Usages:
If we have a vocabulary file "test.txt" with the following content:
```
emerson
lake
palmer
```
```python
features = tf.constant(["emerson", "lake", "and", "palmer"])
table = tf.contrib.lookup.index_table_from_file(
vocabulary_file="test.txt", num_oov_buckets=1)
ids = table.lookup(features)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, 3, 2] # where 3 is the out-of-vocabulary bucket
```
Args:
vocabulary_file: The vocabulary filename, may be a constant scalar `Tensor`.
num_oov_buckets: The number of out-of-vocabulary buckets.
vocab_size: Number of the elements in the vocabulary, if known.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignation of out-of-vocabulary buckets.
key_dtype: The `key` data type.
name: A name for this op (optional).
Returns:
The lookup table to map a `key_dtype` `Tensor` to index `int64` `Tensor`.
Raises:
ValueError: If `vocabulary_file` is not set.
ValueError: If `num_oov_buckets` is negative or `vocab_size` is not greater
than zero.
"""
if vocabulary_file is None or (
isinstance(vocabulary_file, str) and not vocabulary_file):
raise ValueError("vocabulary_file must be specified and must not be empty.")
if num_oov_buckets < 0:
raise ValueError("num_oov_buckets must be greater or equal than 0, got %d."
% num_oov_buckets)
if vocab_size is not None and vocab_size < 1:
vocab_file_value = vocabulary_file
if isinstance(vocabulary_file, ops.Tensor):
vocab_file_value = tensor_util.constant_value(vocabulary_file) or "?"
raise ValueError("vocab_size must be greater than 0, got %d. "
"vocabulary_file: %s" % (vocab_size, vocab_file_value))
if (not key_dtype.is_integer) and (dtypes.string != key_dtype.base_dtype):
raise TypeError("Only integer and string keys are supported.")
with ops.name_scope(name, "string_to_index") as feat_to_id_scope:
table = None
shared_name = ""
with ops.name_scope(None, "hash_table") as hash_table_scope:
if vocab_size:
# Keep the shared_name:
# <table_type>_<filename>_<vocab_size>_<key_index>_<value_index>
shared_name = "hash_table_%s_%d_%s_%s" % (vocabulary_file, vocab_size,
TextFileIndex.WHOLE_LINE,
TextFileIndex.LINE_NUMBER)
else:
# Keep the shared_name
# <table_type>_<filename>_<key_index>_<value_index>
shared_name = "hash_table_%s_%s_%s" % (vocabulary_file,
TextFileIndex.WHOLE_LINE,
TextFileIndex.LINE_NUMBER)
init = TextFileIdTableInitializer(
vocabulary_file,
vocab_size=vocab_size,
key_dtype=dtypes.int64 if key_dtype.is_integer else key_dtype,
name="table_init")
table = HashTable(
init, default_value, shared_name=shared_name, name=hash_table_scope)
if num_oov_buckets:
table = IdTableWithHashBuckets(
table,
num_oov_buckets=num_oov_buckets,
hasher_spec=hasher_spec,
name=feat_to_id_scope,
key_dtype=key_dtype)
return table
def index_table_from_tensor(vocabulary_list,
num_oov_buckets=0,
default_value=-1,
hasher_spec=FastHashSpec,
dtype=dtypes.string,
name=None):
"""Returns a lookup table that converts a string tensor into int64 IDs.
This operation constructs a lookup table to convert tensor of strings into
int64 IDs. The mapping can be initialized from a string `vocabulary_list` 1-D
tensor where each element is a key and corresponding index within the tensor
is the value.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is `[mapping size, mapping size + num_oov_buckets - 1]`.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
vocabulary_list = tf.constant(["emerson", "lake", "palmer"])
table = tf.contrib.lookup.index_table_from_tensor(
mapping=vocabulary_list, num_oov_buckets=1, default_value=-1)
features = tf.constant(["emerson", "lake", "and", "palmer"])
ids = table.lookup(features)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, 4, 2]
```
Args:
vocabulary_list: A 1-D `Tensor` that specifies the mapping of keys to
indices. The type of this object must be castable to `dtype`.
num_oov_buckets: The number of out-of-vocabulary buckets.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignment of out-of-vocabulary buckets.
dtype: The type of values passed to `lookup`. Only string and integers are
supported.
name: A name for this op (optional).
Returns:
The lookup table to map an input `Tensor` to index `int64` `Tensor`.
Raises:
ValueError: If `mapping` is invalid.
ValueError: If `num_oov_buckets` is negative.
"""
if vocabulary_list is None:
raise ValueError("vocabulary_list must be specified.")
if num_oov_buckets < 0:
raise ValueError("num_oov_buckets must be greater or equal than 0, got %d."
% num_oov_buckets)
if (not dtype.is_integer) and (dtypes.string != dtype.base_dtype):
raise TypeError("Only integer and string keys are supported.")
with ops.name_scope(name, "string_to_index") as feat_to_id_scope:
keys = ops.convert_to_tensor(vocabulary_list)
if keys.dtype.is_integer != dtype.is_integer:
raise ValueError("Expected %s, got %s." %
("integer"
if dtype.is_integer else "non-integer", keys.dtype))
if (not dtype.is_integer) and (keys.dtype.base_dtype != dtype):
raise ValueError("Expected %s, got %s." % (dtype, keys.dtype))
num_elements = array_ops.size(keys)
values = math_ops.to_int64(math_ops.range(num_elements))
shared_name = ""
with ops.name_scope(None, "hash_table") as hash_table_scope:
table_keys = math_ops.to_int64(keys) if keys.dtype.is_integer else keys
init = KeyValueTensorInitializer(
table_keys,
values,
table_keys.dtype.base_dtype,
dtypes.int64,
name="table_init")
table = HashTable(
init, default_value, shared_name=shared_name, name=hash_table_scope)
if num_oov_buckets:
table = IdTableWithHashBuckets(
table,
num_oov_buckets=num_oov_buckets,
hasher_spec=hasher_spec,
name=feat_to_id_scope,
key_dtype=dtype)
return table
def index_to_string_table_from_file(vocabulary_file,
vocab_size=None,
default_value="UNK",
name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The table is initialized from a vocabulary file specified in
`vocabulary_file`, where the whole line is the value and the
zero-based line number is the index.
Any input which does not have a corresponding index in the vocabulary file
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Sample Usages:
If we have a vocabulary file "test.txt" with the following content:
```
emerson
lake
palmer
```
```python
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_file(
vocabulary_file="test.txt", default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
vocabulary_file: The vocabulary filename.
vocab_size: Number of the elements in the vocabulary, if known.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `vocabulary_file` is empty.
ValueError: when `vocab_size` is invalid.
"""
if not vocabulary_file:
raise ValueError("vocabulary_file must be specified.")
if vocab_size is not None and vocab_size < 1:
raise ValueError("vocab_size must be greater than 0, got %d." % vocab_size)
with ops.name_scope(name, "index_to_string") as scope:
shared_name = ""
if vocab_size:
# Keep a shared_name
# <table_type>_<filename>_<vocab_size>_<key_index>_<value_index>
shared_name = "hash_table_%s_%d_%s_%s" % (vocabulary_file, vocab_size,
TextFileIndex.LINE_NUMBER,
TextFileIndex.WHOLE_LINE)
else:
# Keep a shared_name <table_type>_<filename>_<key_index>_<value_index>
shared_name = "hash_table_%s_%s_%s" % (vocabulary_file,
TextFileIndex.LINE_NUMBER,
TextFileIndex.WHOLE_LINE)
init = TextFileStringTableInitializer(
vocabulary_file, vocab_size=vocab_size, name="table_init")
# TODO(yleon): Use a more effienct structure.
return HashTable(init, default_value, shared_name=shared_name, name=scope)
def index_to_string_table_from_tensor(vocabulary_list,
default_value="UNK",
name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The mapping is initialized from a string `mapping` 1-D `Tensor` where
each element is a value and the corresponding index within the tensor is the
key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
vocabulary_list = tf.constant(["emerson", "lake", "palmer"])
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
vocabulary_list, default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
vocabulary_list: A 1-D string `Tensor` that specifies the strings to map
from indices.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `vocabulary_list` is not set.
"""
if vocabulary_list is None:
raise ValueError("vocabulary_list must be specified.")
with ops.name_scope(name, "index_to_string") as scope:
vocabulary_list = ops.convert_to_tensor(vocabulary_list, dtypes.string)
num_elements = array_ops.size(vocabulary_list)
keys = math_ops.to_int64(math_ops.range(num_elements))
shared_name = ""
init = KeyValueTensorInitializer(
keys, vocabulary_list, dtypes.int64, dtypes.string, name="table_init")
# TODO(yleon): Use a more effienct structure.
return HashTable(init, default_value, shared_name=shared_name, name=scope)
ops.NotDifferentiable("LookupTableFind")
ops.NotDifferentiable("LookupTableFindV2")
ops.NotDifferentiable("LookupTableInsert")
ops.NotDifferentiable("LookupTableInsertV2")
ops.NotDifferentiable("LookupTableSize")
ops.NotDifferentiable("LookupTableSizeV2")
ops.NotDifferentiable("HashTable")
ops.NotDifferentiable("HashTableV2")
ops.NotDifferentiable("InitializeTable")
ops.NotDifferentiable("InitializeTableV2")
ops.NotDifferentiable("InitializeTableFromTextFile")
ops.NotDifferentiable("InitializeTableFromTextFileV2")
ops.NotDifferentiable("MutableDenseHashTable")
ops.NotDifferentiable("MutableDenseHashTableV2")
ops.NotDifferentiable("MutableHashTable")
ops.NotDifferentiable("MutableHashTableV2")
ops.NotDifferentiable("MutableHashTableOfTensors")
ops.NotDifferentiable("MutableHashTableOfTensorsV2")
|
|
import random
import numpy.random as nr
import processes as proc
import molecules as mol
from processes import Process
from molecules import BioMoleculeCount
from molecules import BioMolecule
'''
Klassen, mit Objekten (DNA, Helicase, Polymerase), Prozess (Replikation). Elternklasse (z.B. BioMolecule, BioMoleculeCount).
Elternklasse vererbt
'''
class DNA(BioMolecule):
def __init__(self, mid, name, length, nucleotides, mass=0):
super(DNA, self).__init__(mid, name, mass)
self._length=length/2
self._nucleotides = nucleotides
@property
def nucleotides(self):
return self._nucleotides
@nucleotides.setter
def nucleotides(self, value):
self._nucleotides = value
@property
def length(self):
return self._length
@length.setter
def length(self, value):
self._length = value
class PolymeraseIII(BioMoleculeCount):
#position=int; bound=boolean
def __init__(self, mid, name, count=0, position=0, bound=False):
super(PolymeraseIII, self).__init__(mid, name, count)
self._position=position
self._bound=bound
@property
def bound(self):
return self._bound
@bound.setter
def bound(self, value):
self._bound = value
@property
def position(self):
return self._position
@position.setter
def position(self, value):
self._position = value
class Helicase(BioMoleculeCount):
#position=int; bound=boolean
def __init__(self, mid, name, count=0, position=0, bound=False):
super(Helicase, self).__init__(mid, name, count)
self._position=position
self._bound = bound
@property
def bound(self):
return self._bound
@bound.setter
def bound(self, value):
self._bound = value
@property
def position(self):
return self._position
@position.setter
def position(self, value):
self._position = value
class Replication(Process):
def __init__(self, id, name, ATP, NT, double=False):
super(Replication, self).__init__(id, name)
self._double = double
self.ATP_molecules = ATP
self.Nucleotide = NT
@property
def double(self):
return self._double
@double.setter
def double(self, value):
self._double = value
def update(self, model):
#return position helicase und polymeraseIII
#Uebergeben der Objekte, vom Ori aus wird in zwei Richtungen repliziert (anticlock und clock)
self.PolymeraseIII_anticlock = model.states['PolymeraseIII_0']
self.PolymeraseIII_clock = model.states['PolymeraseIII_1']
self.Helicase_anticlock = model.states['Helicase_0']
self.Helicase_clock = model.states['Helicase_1']
self.DNA = model.states['DNA']
if self.double ==False: # Replikation darf nur stattfinden, wenn DNA noch nicht veerdoppelt ist
#in anticlock_Richtung
if (self.PolymeraseIII_anticlock.bound == False) and (self.PolymeraseIII_anticlock.position <self.DNA.length):
self.Helicase_anticlock, self.PolymeraseIII_anticlock=self.initiate(DNA, self.PolymeraseIII_anticlock, self.Helicase_anticlock)
elif self.Helicase_anticlock.bound == True:
self.Helicase_anticlock, self.PolymeraseIII_anticlock=self.elongate(DNA, self.PolymeraseIII_anticlock, self.Helicase_anticlock)
else:
self.Helicase_anticlock, self.PolymeraseIII_anticlock=self.terminate(DNA, self.PolymeraseIII_anticlock, self.Helicase_anticlock)
#in clock_Richtung
if (self.PolymeraseIII_clock.bound == False) and (self.PolymeraseIII_anticlock.position <self.DNA.length):
self.Helicase_clock, self.PolymeraseIII_clock = self.initiate(DNA, self.PolymeraseIII_clock, self.Helicase_clock)
elif self.Helicase_clock.bound == True:
self.Helicase_clock, self.PolymeraseIII_clock = self.elongate(DNA, self.PolymeraseIII_clock, self.Helicase_clock)
else:
self.Helicase_clock, self.PolymeraseIII_clock = self.terminate(DNA, self.PolymeraseIII_clock, self.Helicase_clock)
#wenn beide Richtungen komplett sind, wird double auf True gesetzt, um weitere Replikation zu verhindern
if (self.PolymeraseIII_anticlock.position >= self.DNA.length) and (self.PolymeraseIII_clock.position >= self.DNA.length):
self.double = True
self.DNA.nucleotides = 4*self.DNA.length + 2*self.PolymeraseIII_anticlock.position + 2*self.PolymeraseIII_clock.position
#print self.DNA.nucleotides
#print self.Helicase_anticlock.position, self.PolymeraseIII_anticlock.position, self.Helicase_clock.position, self.PolymeraseIII_clock.position, self.DNA.nucleotides
def initiate(self, DNA, Pol, Hel):
"""
wird aufgerufen, wenn Polymerase noch nicht gebunden ist, Helikase bindet mit def. Bindungswahrscheinlichkeit und startet
Strangauftrennung (und damit ATP-Verbrauch). Falls Abstand Helikase-Polymerase Mindestabstand erreicht hat, bindet Polymerase mit def. Bindungswahrscheinlichkeit.
"""
Helicase = Hel
PolymeraseIII =Pol
if Helicase.bound == False:
x_number =nr.randint(1,10)#Bindungswahrscheinlichkeit Helikase
if x_number ==1:
Helicase.bound =True
elif self.ATP_molecules >= 100 and (Helicase.position - PolymeraseIII.position) < 3000:
Helicase.position += 100
self.ATP_molecules -= 100
elif self.ATP_molecules > 0 and (Helicase.position - PolymeraseIII.position) < 3000:
Helicase.position += self.ATP_molecules
self.ATP_molecules -= self.ATP_molecules
if Helicase.position >=1500: # 1500 ist Mindestabstand zwischen Helicase und PolyIII
y_number =nr.randint(1,5)# Bindungswahrscheinlichkeit Polymerase
if y_number ==1:
PolymeraseIII.bound = True
if Helicase.position > self.DNA.length:
self.ATP_molecules=self.ATP_molecules+(Helicase.position -self.DNA.length)
Helicase.position = self.DNA.length
#print ('ATP:',self.ATP_molecules,'NT:',self.Nucleotide)
return Helicase, PolymeraseIII
def elongate(self,DNA, Pol, Hel):
"""
Wird aufgerufen, wenn Polymerase und Helicase gebunden sind. Testet, ob genug ATP Molekuele und Nukelotide vorhanden sind.
Verlaengert pro Step um 100 Nukelotide oder die maximal moegliche Anzahl bei ATP/Nukleotid Begrenzung.
Der maximale Abstand zwischen Helikase und Polymerase ist 3000, der minimale 1500.
"""
Helicase = Hel
PolymeraseIII = Pol
if self.ATP_molecules >= 100 and (Helicase.position - PolymeraseIII.position) < 3000: #genug ATP, Abstand klein genug
Helicase.position += 100
self.ATP_molecules -= 100
if self.Nucleotide >= 200 and (Helicase.position - PolymeraseIII.position) > 1500: #genug Nucleotide (>=200)
PolymeraseIII.position += 100
self.Nucleotide -= 200
elif self.Nucleotide > 1 and (Helicase.position - PolymeraseIII.position) > 1500: #nicht genug Nucleotide (1-199)
PolymeraseIII.position += self.Nucleotide/2
Helicase.position = Helicase.position -100 +self.Nucleotide/2
self.ATP_molecules =self.ATP_molecules+100-self.Nucleotide/2
self.Nucleotide -= 2*(self.Nucleotide/2)
elif self.ATP_molecules >= 0 and (Helicase.position - PolymeraseIII.position) < 3000: #nicht genug ATP, Abstand klein genug
Helicase.position += self.ATP_molecules
if self.Nucleotide >= 200 and (Helicase.position - PolymeraseIII.position) > 1500: #genug Nucleotide
PolymeraseIII.position += 100
self.Nucleotide -= 200
elif self.Nucleotide > 1 and (Helicase.position - PolymeraseIII.position) > 1500: #nicht genug Nucleotide
PolymeraseIII.position += self.Nucleotide/2
Helicase.position = Helicase.position -self.ATP_molecules +self.Nucleotide/2
self.ATP_molecules -=self.Nucleotide/2
self.Nucleotide -= 2*(self.Nucleotide/2)
self.ATP_molecules -= self.ATP_molecules
if Helicase.position > self.DNA.length:
self.ATP_molecules=self.ATP_molecules+(Helicase.position -self.DNA.length)
Helicase.position = self.DNA.length
if Helicase.position >= self.DNA.length:
Helicase.bound =False
#print ('ATP:',self.ATP_molecules,'NT:',self.Nucleotide)
return Helicase, PolymeraseIII
def terminate(self,DNA, Pol, Hel):
"""
Beendet die Replikation. Wird aufgerufen, wenn die Helicase nicht mehr gebunden ist.
Wenn genug Nucleotide vorhanden sind, wird die Polymerase pro Step um 100 Nukelotide verschoben,
sonst um die maximal moegliche Anzahl. Ist die Mitte der DNA erreicht, wird die Polymerase abgeloest
"""
Helicase = Hel
PolymeraseIII = Pol
#print self.DNA.length, PolymeraseIII.position
#wenn pos= length helicase + polyIII fallen ab
if PolymeraseIII.bound == True:
if self.Nucleotide >= 200 :
PolymeraseIII.position += 100
self.Nucleotide -= 200
elif self.Nucleotide>1 :
PolymeraseIII.position += self.Nucleotide/2
self.Nucleotide -= 2*self.Nucleotide/2
if PolymeraseIII.position >= self.DNA.length:
self.Nucleotide += (PolymeraseIII.position-self.DNA.length)*2
PolymeraseIII.position=self.DNA.length
PolymeraseIII.bound = False
return Helicase, PolymeraseIII
def gene_check(self,DNA,Pol_ac,Pol_c,gene_begin,gene_end):
"""
Testet, ob ein Gen schon doppelt oder einfach vorliegt. Uebergeben werden muessen die DNA-Polymerasen,
sowie Start und Endpunkt des Gens auf dem Strang.
return=2 fuer Gene, die schon repliziert wurden
return=1 fuer noch nicht repliziert wurden
"""
PolymeraseIII_ac = Pol_ac
PolymeraseIII_c = Pol_c
if (gene_end < PolymeraseIII_c.position) or (gene_begin > (2*self.DNA.length-PolymeraseIII_ac.position)):
return 2
else:
return 1
|
|
#!/usr/bin/env python3
import re
import sys
import json
import requests
from lxml import etree
from pymods import OAIReader
from bs4 import BeautifulSoup
sys.path.append('../assets')
import assets
tn = {'name': 'sobek', 'prefix': 'http://dpanther.fiu.edu/sobek/content'}
nameSpace_default = { None: '{http://www.loc.gov/mods/v3}',
'oai_dc': '{http://www.openarchives.org/OAI/2.0/oai_dc/}',
'dc': '{http://purl.org/dc/elements/1.1/}',
'mods': '{http://www.loc.gov/mods/v3}',
'dcterms': '{http://purl.org/dc/terms/}',
'xlink': '{http://www.w3.org/1999/xlink}',
'repox': '{http://repox.ist.utl.pt}',
'oai_qdc': '{http://worldcat.org/xmlschemas/qdc-1.0/}'}
PROVIDER = 'FSU'
dprovide = 'FSU'
dc = nameSpace_default['dc']
VERBOSE = True
def write_json_ld(docs):
with open('testData/fiu-repoxfull.json', 'w') as jsonOutput:
json.dump(docs, jsonOutput, indent=2)
with open('test_data/fiu_bzs-1.xml', encoding='utf-8') as data_in:
records = OAIReader(data_in)
docs = []
for record in records:
if 'deleted' in record.attrib.keys():
if record.attrib['deleted'] == 'true':
pass
else:
oai_id = record.oai_urn
if VERBOSE:
print(oai_id)
# logging.debug(oai_id)
sourceResource = {}
# sourceResource.alternative
# sourceResource.collection
# sourceResource.contributor
if record.metadata.get_element('.//{0}contributor'.format(dc)):
sourceResource['contributor'] = [{"name": name}
for name in
record.metadata.get_element(
'.//{0}contributor'.format(dc),
delimiter=';')]
# sourceResource.creator
if record.metadata.get_element('.//{0}creator'.format(dc)):
sourceResource['creator'] = []
for name in record.metadata.get_element('.//{0}creator'.format(dc),
delimiter=';'):
# need to test for ( Contributor ) and ( contributor )
if (len(name) > 0) and ("ontributor )" not in name):
sourceResource['creator'].append({"name": name.strip(" ")})
elif "ontributor )" in name:
if 'contributor' not in sourceResource.keys():
sourceResource['contributor'] = []
sourceResource['contributor'].append({"name": name.strip(
" ").rstrip("( Contributor )").rstrip(
"( contributor )")})
else:
sourceResource['contributor'].append(
{"name": name.strip(" ").rstrip(
"( Contributor )").rstrip("( contributor )")})
# sourceResource.date
date = record.metadata.get_element('.//{0}date'.format(dc))
if date:
sourceResource['date'] = {"begin": date[0], "end": date[0]}
# sourceResource.description
if record.metadata.get_element('.//{0}description'.format(dc)):
sourceResource['description'] = record.metadata.get_element(
'.//{0}description'.format(dc), delimiter=';')
# sourceResource.extent
# sourceResource.format
if record.metadata.get_element('.//{0}format'.format(dc)):
sourceResource['format'] = record.metadata.get_element(
'.//{0}format'.format(dc))
# sourceResource.genre
# sourceResource.identifier
dPantherPURL = re.compile('dpService/dpPurlService/purl')
identifier = record.metadata.get_element('.//{0}identifier'.format(dc))
try:
for ID in identifier:
PURL = dPantherPURL.search(ID)
try:
PURL_match = PURL.string
except AttributeError as err:
# logging.warning(
# 'sourceResource.identifier: {0} - {1}'.format(err,
# oai_id))
print(err, oai_id)
pass
sourceResource['identifier'] = PURL_match
except TypeError as err:
# logging.warning(
# 'sourceResource.identifier: {0} - {1}'.format(err,
# oai_id))
print(err, oai_id)
pass
# if identifier is not None and len(identifier) > 1:
# sourceResource['identifier'] = []
# for ID in identifier:
# try:
# PURL = dPantherPURL.search(ID)
# if PURL:
# PURL_match = PURL.string
# else:
# sourceResource['identifier'].append(ID)
# except TypeError as err:
# # logging.warning(
# # 'sourceResource.identifier: {0} - {1}'.format(err,
# # oai_id))
# print(err, oai_id)
# pass
# else:
# sourceResource['identifier'] = identifier
# sourceResource.language
if record.metadata.get_element('.//{0}language'.format(dc)):
sourceResource['language'] = []
for element in record.metadata.get_element(
'.//{0}language'.format(dc), delimiter=';'):
if len(element) > 3:
sourceResource['language'].append({"name": element})
else:
sourceResource['language'].append({"iso_639_3": element})
# sourceResource.place : sourceResource['spatial']
if record.metadata.get_element('.//{0}coverage'.format(dc)):
sourceResource['spatial'] = [{'name': place}
for place in
record.metadata.get_element(
'.//{0}coverage'.format(dc))]
# sourceResource.publisher
if record.metadata.get_element('.//{0}publisher'.format(dc)):
sourceResource['publisher'] = record.metadata.get_element(
'.//{0}publisher'.format(dc))
# sourceResource.relation
# sourceResource.isReplacedBy
# sourceResource.replaces
# sourceResource.rights
rights = record.metadata.get_element('.//{0}rights'.format(dc))
if rights:
sourceResource['rights'] = [{'text': rights[0]}]
else:
# logging.warning('No sourceResource.rights - {0}'.format(oai_id))
continue
# sourceResource.subject
if record.metadata.get_element('.//{0}subject'.format(dc)):
sourceResource['subject'] = []
for term in record.metadata.get_element('.//{0}subject'.format(dc),
delimiter=';'):
term = re.sub("\( lcsh \)$", '', term)
if len(term) > 0:
sourceResource['subject'].append({"name": term.strip(" ")})
# sourceResource.title
title = record.metadata.get_element('.//{0}title'.format(dc))
if title:
sourceResource['title'] = title
else:
# logging.warning('No sourceResource.rights - {0}'.format(oai_id))
print('Rights', oai_id)
continue
# sourceResource.type
if record.metadata.get_element('.//{0}type'.format(dc)):
sourceResource['type'] = record.metadata.get_element(
'.//{0}type'.format(dc), delimiter=';')
# webResource.fileFormat
# aggregation.dataProvider
data_provider = dprovide
# aggregation.intermediateProvider
# aggregation.isShownAt
# aggregation.preview
try:
preview = assets.thumbnail_service(PURL_match, tn)
except UnboundLocalError as err:
# logging.warning('aggregation.preview: {0} - {1}'.format(err, oai_id))
print(err, oai_id)
continue
# aggregation.provider
try:
docs.append({"@context": "http://api.dp.la/items/context",
"sourceResource": sourceResource,
"aggregatedCHO": "#sourceResource",
"dataProvider": data_provider,
"isShownAt": PURL_match,
"preview": preview,
"provider": PROVIDER})
except NameError as err:
# logging.warning('aggregation.preview: {0} - {1}'.format(err, oai_id))
print(err, oai_id)
pass
#write_json_ld(docs) # write test
print(json.dumps(docs, indent=2)) # dump test
|
|
import ply.yacc as yacc
from exception import ParseException
# necessary even though not explicitly used
from lexer import tokens
# The parser returns a dictionary describing the contents of the line.
# line_no: int
# label: string for a label on the line
# directive: dictionary {
# align: int
# address: int
# double: [double...]
# float: [float...]
# space: int
# string: [string...]
# word: [int...]
# }
# instruction: dictionary {
# opcode: string
# rd: int
# rs1: int
# rs2: int
# immediate: int
# label: string
# }
# string values for the above
line_no = "line_no"
label = "label"
directive = "directive"
d_align = "align"
d_address = "address"
d_double = "double"
d_float = "float"
d_space = "space"
d_string = "string"
d_word = "word"
instruction = "instruction"
i_opcode = "opcode"
i_rd = "rd"
i_rs1 = "rs1"
i_rs2 = "rs2"
i_immediate = "immediate"
i_label = label
# default addresses
default_text_addr = 0
default_data_addr = 0x200
# Each function below describes a rule (or rules) in the grammar of the dlx
# syntax. Grammar rules are defined in the docstring of the function.
# Lower case labels are non-terminals.
# Upper case labels are tokens from the lexer (terminals).
# Many functions define invalid rule combinations in order to catch errors,
# because it seems that the parser doesn't always signal errors when it can't
# match all the tokens in its stack.
def p_line(p):
"""line : label statement comment"""
p[0] = {line_no: p.lineno(0)}
if p[1]:
p[0][label] = p[1]
if p[2]:
p[0] = dict(p[0].items() + p[2].items())
def p_label(p):
"""label : NAME ':'
|
"""
if len(p) is 3:
p[0] = p[1]
else:
p[0] = None
def p_statement_directive(p):
"""statement : directive"""
p[0] = {directive: p[1]}
def p_statement_instruction(p):
"""statement : instruction"""
p[0] = {instruction: p[1]}
def p_statement_empty(p):
"""statement :"""
p[0] = None
def p_comment(p):
"""comment : COMMENT
|
"""
p[0] = None
def p_directive_align(p):
"""directive : d_ALIGN unsigned
| d_ALIGN
"""
if len(p) is 3:
p[0] = {d_align: p[2]}
else:
raise ParseException(
"ERROR line {0}: .align requires 1 integer parameter.".format(
p.lineno(1)
)
)
def p_directive_asciiz(p):
"""directive : d_ASCIIZ stringlist
| d_ASCIIZ
"""
if len(p) is 3:
p[0] = {d_string: p[2]}
else:
raise ParseException(
"ERROR line {0}: .asciiz requires a list of strings.".format(
p.lineno(1)
)
)
def p_directive_data(p):
"""directive : d_DATA int
| d_DATA
"""
if len(p) is 3:
p[0] = {d_address: p[2]}
else:
p[0] = {d_address: default_data_addr}
def p_directive_double(p):
"""directive : d_DOUBLE numlist
| d_DOUBLE
"""
if len(p) is 3:
p[2] = [float(x) for x in p[2]]
p[0] = {d_double: p[2]}
else:
raise ParseException(
"ERROR line {0}: .double requires double parameters".format(
p.lineno(1)
)
)
def p_directive_float(p):
"""directive : d_FLOAT numlist
| d_FLOAT
"""
if len(p) is 3:
p[2] = [float(x) for x in p[2]]
p[0] = {d_float: p[2]}
else:
raise ParseException(
"ERROR line {0}: .float requires float parameters".format(
p.lineno(1)
)
)
def p_directive_text(p):
"""directive : d_TEXT unsigned
| d_TEXT
"""
if len(p) is 3:
p[0] = {d_address: p[2]}
else:
p[0] = {d_address: default_text_addr}
def p_directive_space(p):
"""directive : d_SPACE unsigned
| d_SPACE
"""
if len(p) is 3:
p[0] = {d_space: p[2]}
else:
raise ParseException(
"ERROR line {0}: .space requires 1 integer parameters".format(
p.lineno(1)
)
)
def p_directive_word(p):
"""directive : d_WORD numlist
| d_WORD
"""
if len(p) is 3:
lst = []
for word in p[2]:
if not isinstance(word, int):
raise ParseException(
"ERROR line {0}: .word requires integer parameters".format(
p.lineno(1)
)
)
else:
lst.append(word)
p[0] = {d_word: lst}
else:
raise ParseException(
"ERROR line {0}: .word requires integer parameters".format(
p.lineno(1)
)
)
def p_directive_unknown(p):
"""directive : DIRECTIVE numlist
| DIRECTIVE stringlist
| DIRECTIVE
"""
raise ParseException(
"ERROR line {0}: unknown directive {1}".format(
p.lineno(1),
p[1]
)
)
def p_instruction_none(p):
"""instruction : i_NONE"""
p[0] = {i_opcode: p[1]}
def p_instruction_name(p):
"""instruction : i_NAME unsigned
| i_NAME NAME
| i_NAME
| i_GPR GPR
| i_GPR
| i_NUM unsigned
| i_NUM
"""
if len(p) is 3:
p[0] = {i_opcode: p[1]}
# label
if p.slice[2].type == "NAME":
p[0][i_label] = p[2]
# gpr
elif p.slice[2].type == "GPR":
p[0][i_rs1] = p[2]
# number
else:
p[0][i_immediate] = p[2]
else:
raise ParseException(
"ERROR line {0}: missing parameter for {1}".format(
p.lineno(0),
p[1]
)
)
def p_instruction_gpr_arg(p):
"""instruction : i_GPR_NAME GPR ',' NAME
| i_GPR_NAME GPR ','
| i_GPR_NAME GPR
| i_GPR_NAME
| i_GPR_UINT GPR ',' NAME
| i_GPR_UINT GPR ',' unsigned
| i_GPR_UINT GPR ','
| i_GPR_UINT GPR
| i_GPR_UINT
"""
if len(p) is 5:
p[0] = {i_opcode: p[1]}
# these branch instructions use rs1
if p.slice[1].type == "i_GPR_NAME":
p[0][i_rs1] = p[2]
# but lhi uses rd...
else:
p[0][i_rd] = p[2]
# label
if p.slice[4].type == "NAME":
p[0][i_label] = p[4]
# number
else:
p[0][i_immediate] = p[4]
else:
raise ParseException(
"ERROR line {0}: missing parameter for {1}".format(
p.lineno(0),
p[1]
)
)
def p_instruction_2reg(p):
"""instruction : i_GPR_FPR GPR ',' FPR
| i_GPR_FPR GPR ','
| i_GPR_FPR GPR
| i_GPR_FPR
| i_2DPR dpr ',' dpr
| i_2DPR dpr ','
| i_2DPR dpr
| i_2DPR
| i_2FPR FPR ',' FPR
| i_2FPR FPR ','
| i_2FPR FPR
| i_2FPR
| i_FPR_GPR FPR ',' GPR
| i_FPR_GPR FPR ','
| i_FPR_GPR FPR
| i_FPR_GPR
| i_FPR_DPR FPR ',' dpr
| i_FPR_DPR FPR ','
| i_FPR_DPR FPR
| i_FPR_DPR
| i_DPR_FPR dpr ',' FPR
| i_DPR_FPR dpr ','
| i_DPR_FPR dpr
| i_DPR_FPR
"""
if len(p) is 5:
p[0] = {
i_opcode: p[1],
i_rd: p[2],
i_rs1: p[4]
}
else:
raise ParseException(
"ERROR line {0}: missing parameter for {1}".format(
p.lineno(0),
p[1]
)
)
def p_instruction_2gpr_num(p):
"""instruction : i_2GPR_INT GPR ',' GPR ',' NAME
| i_2GPR_INT GPR ',' GPR ',' int
| i_2GPR_INT GPR ',' GPR ','
| i_2GPR_INT GPR ',' GPR
| i_2GPR_INT GPR ','
| i_2GPR_INT GPR
| i_2GPR_INT
| i_2GPR_UINT GPR ',' GPR ',' NAME
| i_2GPR_UINT GPR ',' GPR ',' unsigned
| i_2GPR_UINT GPR ',' GPR ','
| i_2GPR_UINT GPR ',' GPR
| i_2GPR_UINT GPR ','
| i_2GPR_UINT GPR
| i_2GPR_UINT
"""
if len(p) is 7:
p[0] = {
i_opcode: p[1],
i_rd: p[2],
i_rs1: p[4]
}
# label
if p.slice[6].type == "NAME":
p[0][i_label] = p[6]
# num
else:
p[0][i_immediate] = p[6]
else:
raise ParseException(
"ERROR line {0}: missing parameter for {1}".format(
p.lineno(0),
p[1]
)
)
def p_instruction_3reg(p):
"""instruction : i_3GPR GPR ',' GPR ',' GPR
| i_3GPR GPR ',' GPR ','
| i_3GPR GPR ',' GPR
| i_3GPR GPR ','
| i_3GPR GPR
| i_3GPR
| i_3DPR dpr ',' dpr ',' dpr
| i_3DPR dpr ',' dpr ','
| i_3DPR dpr ',' dpr
| i_3DPR dpr ','
| i_3DPR dpr
| i_3DPR
| i_3FPR FPR ',' FPR ',' FPR
| i_3FPR FPR ',' FPR ','
| i_3FPR FPR ',' FPR
| i_3FPR FPR ','
| i_3FPR FPR
| i_3FPR
"""
if len(p) is 7:
p[0] = {
i_opcode: p[1],
i_rd: p[2],
i_rs1: p[4],
i_rs2: p[6]
}
else:
raise ParseException(
"ERROR line {0}: missing parameter for {1}".format(
p.lineno(0),
p[1]
)
)
def p_instruction_reg_offset(p):
"""instruction : i_GPR_OFFSET GPR ',' offset
| i_GPR_OFFSET GPR ','
| i_GPR_OFFSET GPR
| i_GPR_OFFSET
| i_DPR_OFFSET dpr ',' offset
| i_DPR_OFFSET dpr ','
| i_DPR_OFFSET dpr
| i_DPR_OFFSET
| i_FPR_OFFSET FPR ',' offset
| i_FPR_OFFSET FPR ','
| i_FPR_OFFSET FPR
| i_FPR_OFFSET
"""
if len(p) is 5:
p[0] = p[4]
p[0][i_opcode] = p[1]
p[0][i_rd] = p[2]
else:
raise ParseException(
"ERROR line {0}: missing parameter for {1}".format(
p.lineno(0),
p[1]
)
)
def p_instruction_offset_reg(p):
"""instruction : i_OFFSET_GPR offset ',' GPR
| i_OFFSET_GPR offset ','
| i_OFFSET_GPR offset
| i_OFFSET_GPR
| i_OFFSET_DPR offset ',' dpr
| i_OFFSET_DPR offset ','
| i_OFFSET_DPR offset
| i_OFFSET_DPR
| i_OFFSET_FPR offset ',' FPR
| i_OFFSET_FPR offset ','
| i_OFFSET_FPR offset
| i_OFFSET_FPR
"""
if len(p) is 5:
p[0] = p[2]
p[0][i_opcode] = p[1]
p[0][i_rd] = p[4]
else:
raise ParseException(
"ERROR line {0}: missing parameter for {1}".format(
p.lineno(0),
p[1]
)
)
def p_dpr(p):
"""dpr : FPR"""
if int(p[1][1:]) not in range(0, 31, 2):
raise ParseException(
"ERROR line {0}: register {1} invalid, even number register "
"required".format(
p.lineno(1),
p[1]
)
)
else:
p[0] = p[1]
def p_offset(p):
"""offset : int '(' GPR ')'
| NAME
"""
if len(p) is 5:
p[0] = {
i_immediate: p[1],
i_rs1: p[3]
}
else:
p[0] = {i_label: p[1]}
def p_unsigned(p):
"""unsigned : NUMBER"""
if not isinstance(p[1], int):
raise ParseException(
"ERROR line {0}: expected int, found float".format(
p.lineno(1)
)
)
if p[1] < 0:
raise ParseException(
"ERROR line {0}: unsigned int required".format(
p.lineno(1)
)
)
# validate that it will fit in 16 bits
if (p[1] & ~0xffff) is not 0:
print "WARNING line {0}: unsigned immediate larger than 16 bits".format(
p.lineno(1)
)
p[0] = p[1]
def p_int(p):
"""int : NUMBER"""
if not isinstance(p[1], int):
raise ParseException(
"ERROR line {0}: expected int, found float".format(
p.lineno(1)
)
)
imm_max = int(2**16) - 1
imm_min = -int(2**16)
if p[1] > imm_max or p[1] < imm_min:
print "WARNING line {0}: signed immediate larger than 16 bits".format(
p.lineno(1)
)
p[0] = p[1]
def p_numlist(p):
"""numlist : numlist ',' NUMBER
| numlist ','
| NUMBER
"""
if len(p) is 4:
p[1].append(p[3])
p[0] = p[1]
elif len(p) is 3:
raise ParseException(
"ERROR line {0}: incomplete list".format(
p.lineno(1)
)
)
else:
p[0] = [p[1]]
def p_stringlist(p):
"""stringlist : stringlist ',' STRING
| stringlist ','
| STRING
"""
if len(p) is 4:
p[1].append(p[3])
p[0] = p[1]
elif len(p) is 3:
raise ParseException(
"ERROR line {0}: incomplete list".format(
p.lineno(1)
)
)
else:
p[0] = [p[1]]
# Handle errors signaled from the parser.
def p_error(p):
if p is not None:
# line counter isn't auto incremented due to exception
p.lexer.lineno += 1
raise ParseException(
"ERROR line {0}: unknown token type {1} value \"{2}\"".format(
p.lineno,
p.type,
p.value
)
)
# build the parser
parser = yacc.yacc()
|
|
"""Supporting functions for polydata and grid objects."""
import os
import collections.abc
import enum
import logging
import signal
import sys
from threading import Thread
import threading
import traceback
import numpy as np
from pyvista import _vtk
import pyvista
from .fileio import from_meshio
from . import transformations
class FieldAssociation(enum.Enum):
"""Represents which type of vtk field a scalar or vector array is associated with."""
POINT = _vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS
CELL = _vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS
NONE = _vtk.vtkDataObject.FIELD_ASSOCIATION_NONE
ROW = _vtk.vtkDataObject.FIELD_ASSOCIATION_ROWS
def get_vtk_type(typ):
"""Look up the VTK type for a give python data type.
Corrects for string type mapping issues.
Returns
-------
int : the integer type id specified in vtkType.h
"""
typ = _vtk.get_vtk_array_type(typ)
# This handles a silly string type bug
if typ == 3:
return 13
return typ
def vtk_bit_array_to_char(vtkarr_bint):
"""Cast vtk bit array to a char array."""
vtkarr = _vtk.vtkCharArray()
vtkarr.DeepCopy(vtkarr_bint)
return vtkarr
def vtk_id_list_to_array(vtk_id_list):
"""Convert a vtkIdList to a NumPy array."""
return np.array([vtk_id_list.GetId(i) for i in range(vtk_id_list.GetNumberOfIds())])
def convert_string_array(arr, name=None):
"""Convert a numpy array of strings to a vtkStringArray or vice versa.
Note that this is terribly inefficient - inefficient support
is better than no support :). If you have ideas on how to make this faster,
please consider opening a pull request.
"""
if isinstance(arr, np.ndarray):
vtkarr = _vtk.vtkStringArray()
########### OPTIMIZE ###########
for val in arr:
vtkarr.InsertNextValue(val)
################################
if isinstance(name, str):
vtkarr.SetName(name)
return vtkarr
# Otherwise it is a vtk array and needs to be converted back to numpy
############### OPTIMIZE ###############
nvalues = arr.GetNumberOfValues()
return np.array([arr.GetValue(i) for i in range(nvalues)], dtype='|U')
########################################
def convert_array(arr, name=None, deep=0, array_type=None):
"""Convert a NumPy array to a vtkDataArray or vice versa.
Parameters
-----------
arr : ndarray or vtkDataArry
A numpy array or vtkDataArry to convert
name : str
The name of the data array for VTK
deep : bool
if input is numpy array then deep copy values
Returns
-------
vtkDataArray, ndarray, or DataFrame:
the converted array (if input is a NumPy ndaray then returns
``vtkDataArray`` or is input is ``vtkDataArray`` then returns NumPy
``ndarray``). If pdf==True and the input is ``vtkDataArry``,
return a pandas DataFrame.
"""
if arr is None:
return
if isinstance(arr, np.ndarray):
if arr.dtype is np.dtype('O'):
arr = arr.astype('|S')
arr = np.ascontiguousarray(arr)
if arr.dtype.type in (np.str_, np.bytes_):
# This handles strings
vtk_data = convert_string_array(arr)
else:
# This will handle numerical data
arr = np.ascontiguousarray(arr)
vtk_data = _vtk.numpy_to_vtk(num_array=arr, deep=deep,
array_type=array_type)
if isinstance(name, str):
vtk_data.SetName(name)
return vtk_data
# Otherwise input must be a vtkDataArray
if not isinstance(arr, (_vtk.vtkDataArray, _vtk.vtkBitArray, _vtk.vtkStringArray)):
raise TypeError(f'Invalid input array type ({type(arr)}).')
# Handle booleans
if isinstance(arr, _vtk.vtkBitArray):
arr = vtk_bit_array_to_char(arr)
# Handle string arrays
if isinstance(arr, _vtk.vtkStringArray):
return convert_string_array(arr)
# Convert from vtkDataArry to NumPy
return _vtk.vtk_to_numpy(arr)
def is_pyvista_dataset(obj):
"""Return True if the Object is a PyVista wrapped dataset."""
return isinstance(obj, (pyvista.DataSet, pyvista.MultiBlock))
def point_array(mesh, name):
"""Return point array of a vtk object."""
vtkarr = mesh.GetPointData().GetAbstractArray(name)
return convert_array(vtkarr)
def field_array(mesh, name):
"""Return field array of a vtk object."""
vtkarr = mesh.GetFieldData().GetAbstractArray(name)
return convert_array(vtkarr)
def cell_array(mesh, name):
"""Return cell array of a vtk object."""
vtkarr = mesh.GetCellData().GetAbstractArray(name)
return convert_array(vtkarr)
def row_array(data_object, name):
"""Return row array of a vtk object."""
vtkarr = data_object.GetRowData().GetAbstractArray(name)
return convert_array(vtkarr)
def parse_field_choice(field):
"""Return the id of the given field."""
if isinstance(field, str):
field = field.strip().lower()
if field in ['cell', 'c', 'cells']:
field = FieldAssociation.CELL
elif field in ['point', 'p', 'points']:
field = FieldAssociation.POINT
elif field in ['field', 'f', 'fields']:
field = FieldAssociation.NONE
elif field in ['row', 'r']:
field = FieldAssociation.ROW
else:
raise ValueError(f'Data field ({field}) not supported.')
elif isinstance(field, FieldAssociation):
pass
else:
raise ValueError(f'Data field ({field}) not supported.')
return field
def get_array(mesh, name, preference='cell', info=False, err=False):
"""Search point, cell and field data for an array.
Parameters
----------
name : str
The name of the array to get the range.
preference : str, optional
When scalars is specified, this is the preferred array type to
search for in the dataset. Must be either ``'point'``,
``'cell'``, or ``'field'``
info : bool
Return info about the array rather than the array itself.
err : bool
Boolean to control whether to throw an error if array is not present.
"""
if isinstance(mesh, _vtk.vtkTable):
arr = row_array(mesh, name)
if arr is None and err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
field = FieldAssociation.ROW
if info:
return arr, field
return arr
parr = point_array(mesh, name)
carr = cell_array(mesh, name)
farr = field_array(mesh, name)
preference = parse_field_choice(preference)
if np.sum([parr is not None, carr is not None, farr is not None]) > 1:
if preference == FieldAssociation.CELL:
if info:
return carr, FieldAssociation.CELL
else:
return carr
elif preference == FieldAssociation.POINT:
if info:
return parr, FieldAssociation.POINT
else:
return parr
elif preference == FieldAssociation.NONE:
if info:
return farr, FieldAssociation.NONE
else:
return farr
else:
raise ValueError(f'Data field ({preference}) not supported.')
arr = None
field = None
if parr is not None:
arr = parr
field = FieldAssociation.POINT
elif carr is not None:
arr = carr
field = FieldAssociation.CELL
elif farr is not None:
arr = farr
field = FieldAssociation.NONE
elif err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
if info:
return arr, field
return arr
def vtk_points(points, deep=True):
"""Convert numpy array or array-like to a vtkPoints object."""
points = np.asanyarray(points)
# verify is numeric
if not np.issubdtype(points.dtype, np.number):
raise TypeError('Points must be a numeric type')
# check dimensionality
if points.ndim == 1:
points = points.reshape(-1, 3)
elif points.ndim > 2:
raise ValueError('Dimension of ``points`` should be 1 or 2, not '
f'{points.ndim}')
# verify shape
if points.shape[1] != 3:
raise ValueError('Points array must contain three values per point. '
f'Shape is {points.shape} and should be (X, 3)')
# points must be contiguous
points = np.require(points, requirements=['C'])
vtkpts = _vtk.vtkPoints()
vtk_arr = _vtk.numpy_to_vtk(points, deep=deep)
vtkpts.SetData(vtk_arr)
return vtkpts
def line_segments_from_points(points):
"""Generate non-connected line segments from points.
Assumes points are ordered as line segments and an even number of
points.
Parameters
----------
points : np.ndarray
Points representing line segments. An even number must be
given as every two vertices represent a single line
segment. For example, two line segments would be represented
as:
``np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])``
Returns
-------
lines : pyvista.PolyData
PolyData with lines and cells.
Examples
--------
This example plots two line segments at right angles to each other.
>>> import pyvista
>>> import numpy as np
>>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> lines = pyvista.lines_from_points(points)
>>> cpos = lines.plot()
"""
if len(points) % 2 != 0:
raise ValueError("An even number of points must be given to define each segment.")
# Assuming ordered points, create array defining line order
n_points = len(points)
n_lines = n_points // 2
lines = np.c_[(2 * np.ones(n_lines, np.int_),
np.arange(0, n_points-1, step=2),
np.arange(1, n_points+1, step=2))]
poly = pyvista.PolyData()
poly.points = points
poly.lines = lines
return poly
def lines_from_points(points, close=False):
"""Make a connected line set given an array of points.
Parameters
----------
points : np.ndarray
Points representing the vertices of the connected segments. For
example, two line segments would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]])
close : bool, optional
If True, close the line segments into a loop
Returns
-------
lines : pyvista.PolyData
PolyData with lines and cells.
"""
poly = pyvista.PolyData()
poly.points = points
cells = np.full((len(points)-1, 3), 2, dtype=np.int_)
cells[:, 1] = np.arange(0, len(points)-1, dtype=np.int_)
cells[:, 2] = np.arange(1, len(points), dtype=np.int_)
if close:
cells = np.append(cells, [[2, len(points)-1, 0]], axis=0)
poly.lines = cells
return poly
def make_tri_mesh(points, faces):
"""Construct a ``pyvista.PolyData`` mesh using points and faces arrays.
Construct a mesh from an Nx3 array of points and an Mx3 array of
triangle indices, resulting in a mesh with N vertices and M
triangles. This function does not require the standard VTK
"padding" column and simplifies mesh creation.
Parameters
----------
points : np.ndarray
Array of points with shape (N, 3) storing the vertices of the
triangle mesh.
faces : np.ndarray
Array of indices with shape ``(M, 3)`` containing the triangle
indices.
Returns
-------
tri_mesh : pyvista.PolyData
PolyData instance containing the triangle mesh.
Examples
--------
This example discretizes the unit square into a triangle mesh with
nine vertices and eight faces.
>>> import numpy as np
>>> import pyvista as pv
>>> points = np.array([[0, 0, 0], [0.5, 0, 0], [1, 0, 0], [0, 0.5, 0],
... [0.5, 0.5, 0], [1, 0.5, 0], [0, 1, 0], [0.5, 1, 0],
... [1, 1, 0]])
>>> faces = np.array([[0, 1, 4], [4, 7, 6], [2, 5, 4], [4, 5, 8],
... [0, 4, 3], [3, 4, 6], [1, 2, 4], [4, 8, 7]])
>>> tri_mesh = pyvista.make_tri_mesh(points, faces)
>>> cpos = tri_mesh.plot(show_edges=True)
"""
if points.shape[1] != 3:
raise ValueError("Points array should have shape (N, 3).")
if faces.ndim != 2 or faces.shape[1] != 3:
raise ValueError("Face array should have shape (M, 3).")
cells = np.empty((faces.shape[0], 4), dtype=faces.dtype)
cells[:, 0] = 3
cells[:, 1:] = faces
return pyvista.PolyData(points, cells)
def vector_poly_data(orig, vec):
"""Create a vtkPolyData object composed of vectors."""
# shape, dimension checking
if not isinstance(orig, np.ndarray):
orig = np.asarray(orig)
if not isinstance(vec, np.ndarray):
vec = np.asarray(vec)
if orig.ndim != 2:
orig = orig.reshape((-1, 3))
elif orig.shape[1] != 3:
raise ValueError('orig array must be 3D')
if vec.ndim != 2:
vec = vec.reshape((-1, 3))
elif vec.shape[1] != 3:
raise ValueError('vec array must be 3D')
# Create vtk points and cells objects
vpts = _vtk.vtkPoints()
vpts.SetData(_vtk.numpy_to_vtk(np.ascontiguousarray(orig), deep=True))
npts = orig.shape[0]
cells = np.empty((npts, 2), dtype=pyvista.ID_TYPE)
cells[:, 0] = 1
cells[:, 1] = np.arange(npts, dtype=pyvista.ID_TYPE)
vcells = pyvista.utilities.cells.CellArray(cells, npts)
# Create vtkPolyData object
pdata = _vtk.vtkPolyData()
pdata.SetPoints(vpts)
pdata.SetVerts(vcells)
# Add vectors to polydata
name = 'vectors'
vtkfloat = _vtk.numpy_to_vtk(np.ascontiguousarray(vec), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveVectors(name)
# Add magnitude of vectors to polydata
name = 'mag'
scalars = (vec * vec).sum(1)**0.5
vtkfloat = _vtk.numpy_to_vtk(np.ascontiguousarray(scalars), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveScalars(name)
return pyvista.PolyData(pdata)
def trans_from_matrix(matrix): # pragma: no cover
"""Convert a vtk matrix to a numpy.ndarray.
DEPRECATED: Please use ``array_from_vtkmatrix``.
"""
# import needs to happen here to prevent a circular import
from pyvista.core.errors import DeprecationError
raise DeprecationError('DEPRECATED: Please use ``array_from_vtkmatrix``.')
def array_from_vtkmatrix(matrix):
"""Convert a vtk matrix to a ``numpy.ndarray``.
Parameters
----------
matrix : vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4
The vtk matrix to be converted to a ``numpy.ndarray``.
Returned ndarray has shape (3, 3) or (4, 4) as appropriate.
"""
if isinstance(matrix, _vtk.vtkMatrix3x3):
shape = (3, 3)
elif isinstance(matrix, _vtk.vtkMatrix4x4):
shape = (4, 4)
else:
raise TypeError('Expected vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4 input,'
f' got {type(matrix).__name__} instead.')
array = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
array[i, j] = matrix.GetElement(i, j)
return array
def vtkmatrix_from_array(array):
"""Convert a ``numpy.ndarray`` or array-like to a vtk matrix.
Parameters
----------
array : numpy.ndarray or array-like
The array or array-like to be converted to a vtk matrix.
Shape (3, 3) gets converted to a ``vtk.vtkMatrix3x3``, shape (4, 4)
gets converted to a ``vtk.vtkMatrix4x4``. No other shapes are valid.
"""
array = np.asarray(array)
if array.shape == (3, 3):
matrix = _vtk.vtkMatrix3x3()
elif array.shape == (4, 4):
matrix = _vtk.vtkMatrix4x4()
else:
raise ValueError(f'Invalid shape {array.shape}, must be (3, 3) or (4, 4).')
m, n = array.shape
for i in range(m):
for j in range(n):
matrix.SetElement(i, j, array[i, j])
return matrix
def is_meshio_mesh(mesh):
"""Test if passed object is instance of ``meshio.Mesh``."""
try:
import meshio
return isinstance(mesh, meshio.Mesh)
except ImportError:
return False
def wrap(dataset):
"""Wrap any given VTK data object to its appropriate pyvista data object.
Other formats that are supported include:
* 2D :class:`numpy.ndarray` of XYZ vertices
* 3D :class:`numpy.ndarray` representing a volume. Values will be scalars.
* 3D :class:`trimesh.Trimesh` mesh.
* 3D :class:`meshio` mesh.
Parameters
----------
dataset : :class:`numpy.ndarray`, :class:`trimesh.Trimesh`, or VTK object
Dataset to wrap.
Returns
-------
wrapped_dataset : pyvista class
The `pyvista` wrapped dataset.
Examples
--------
Wrap a numpy array representing a random point cloud.
>>> import numpy as np
>>> import pyvista
>>> points = np.random.random((10, 3))
>>> cloud = pyvista.wrap(points)
>>> cloud # doctest:+SKIP
PolyData (0x7fc52db83d70)
N Cells: 10
N Points: 10
X Bounds: 1.123e-01, 7.457e-01
Y Bounds: 1.009e-01, 9.877e-01
Z Bounds: 2.346e-03, 9.640e-01
N Arrays: 0
Wrap a Trimesh object.
>>> import trimesh
>>> import pyvista
>>> points = [[0, 0, 0], [0, 0, 1], [0, 1, 0]]
>>> faces = [[0, 1, 2]]
>>> tmesh = trimesh.Trimesh(points, faces=faces, process=False)
>>> mesh = pyvista.wrap(tmesh)
>>> mesh # doctest:+SKIP
PolyData (0x7fc55ff27ad0)
N Cells: 1
N Points: 3
X Bounds: 0.000e+00, 0.000e+00
Y Bounds: 0.000e+00, 1.000e+00
Z Bounds: 0.000e+00, 1.000e+00
N Arrays: 0
Wrap a VTK object.
>>> import pyvista
>>> import vtk
>>> points = vtk.vtkPoints()
>>> p = [1.0, 2.0, 3.0]
>>> vertices = vtk.vtkCellArray()
>>> pid = points.InsertNextPoint(p)
>>> _ = vertices.InsertNextCell(1)
>>> _ = vertices.InsertCellPoint(pid)
>>> point = vtk.vtkPolyData()
>>> _ = point.SetPoints(points)
>>> _ = point.SetVerts(vertices)
>>> mesh = pyvista.wrap(point)
>>> mesh # doctest:+SKIP
PolyData (0x7fc55ff27ad0)
N Cells: 1
N Points: 3
X Bounds: 0.000e+00, 0.000e+00
Y Bounds: 0.000e+00, 1.000e+00
Z Bounds: 0.000e+00, 1.000e+00
N Arrays: 0
"""
# Return if None
if dataset is None:
return
# Check if dataset is a numpy array. We do this first since
# pyvista_ndarray contains a VTK type that we don't want to
# directly wrap.
if isinstance(dataset, (np.ndarray, pyvista.pyvista_ndarray)):
if dataset.ndim == 1 and dataset.shape[0] == 3:
return pyvista.PolyData(dataset)
if dataset.ndim > 1 and dataset.ndim < 3 and dataset.shape[1] == 3:
return pyvista.PolyData(dataset)
elif dataset.ndim == 3:
mesh = pyvista.UniformGrid(dataset.shape)
mesh['values'] = dataset.ravel(order='F')
mesh.active_scalars_name = 'values'
return mesh
else:
raise NotImplementedError('NumPy array could not be wrapped pyvista.')
wrappers = {
'vtkExplicitStructuredGrid': pyvista.ExplicitStructuredGrid,
'vtkUnstructuredGrid': pyvista.UnstructuredGrid,
'vtkRectilinearGrid': pyvista.RectilinearGrid,
'vtkStructuredGrid': pyvista.StructuredGrid,
'vtkPolyData': pyvista.PolyData,
'vtkImageData': pyvista.UniformGrid,
'vtkStructuredPoints': pyvista.UniformGrid,
'vtkMultiBlockDataSet': pyvista.MultiBlock,
'vtkTable': pyvista.Table,
# 'vtkParametricSpline': pyvista.Spline,
}
# Check if a dataset is a VTK type
if hasattr(dataset, 'GetClassName'):
key = dataset.GetClassName()
try:
return wrappers[key](dataset)
except KeyError:
logging.warning(f'VTK data type ({key}) is not currently supported by pyvista.')
return
# wrap meshio
if is_meshio_mesh(dataset):
return from_meshio(dataset)
# wrap trimesh
if dataset.__class__.__name__ == 'Trimesh':
# trimesh doesn't pad faces
n_face = dataset.faces.shape[0]
faces = np.empty((n_face, 4), dataset.faces.dtype)
faces[:, 1:] = dataset.faces
faces[:, 0] = 3
return pyvista.PolyData(np.asarray(dataset.vertices), faces)
# otherwise, flag tell the user we can't wrap this object
raise NotImplementedError(f'Unable to wrap ({type(dataset)}) into a pyvista type.')
def image_to_texture(image):
"""Convert ``vtkImageData`` (:class:`pyvista.UniformGrid`) to a ``vtkTexture``."""
return pyvista.Texture(image)
def numpy_to_texture(image):
"""Convert a NumPy image array to a vtk.vtkTexture."""
return pyvista.Texture(image)
def is_inside_bounds(point, bounds):
"""Check if a point is inside a set of bounds.
This is implemented through recursion so that this is N-dimensional.
"""
if isinstance(point, (int, float)):
point = [point]
if isinstance(point, (np.ndarray, collections.abc.Sequence)) and not isinstance(point, collections.deque):
if len(bounds) < 2 * len(point) or len(bounds) % 2 != 0:
raise ValueError('Bounds mismatch point dimensionality')
point = collections.deque(point)
bounds = collections.deque(bounds)
return is_inside_bounds(point, bounds)
if not isinstance(point, collections.deque):
raise TypeError(f'Unknown input data type ({type(point)}).')
if len(point) < 1:
return True
p = point.popleft()
lower, upper = bounds.popleft(), bounds.popleft()
if lower <= p <= upper:
return is_inside_bounds(point, bounds)
return False
def fit_plane_to_points(points, return_meta=False):
"""Fit a plane to a set of points.
Parameters
----------
points : np.ndarray
Size n by 3 array of points to fit a plane through
return_meta : bool
If true, also returns the center and normal used to generate the plane
"""
data = np.array(points)
center = data.mean(axis=0)
result = np.linalg.svd(data - center)
normal = np.cross(result[2][0], result[2][1])
plane = pyvista.Plane(center=center, direction=normal)
if return_meta:
return plane, center, normal
return plane
def raise_not_matching(scalars, mesh):
"""Raise exception about inconsistencies."""
if isinstance(mesh, _vtk.vtkTable):
raise ValueError(f'Number of scalars ({scalars.size}) must match number of rows ({mesh.n_rows}).')
raise ValueError(f'Number of scalars ({scalars.size}) '
f'must match either the number of points ({mesh.n_points}) '
f'or the number of cells ({mesh.n_cells}).')
def generate_plane(normal, origin):
"""Return a _vtk.vtkPlane."""
plane = _vtk.vtkPlane()
# NORMAL MUST HAVE MAGNITUDE OF 1
normal = normal / np.linalg.norm(normal)
plane.SetNormal(normal)
plane.SetOrigin(origin)
return plane
def try_callback(func, *args):
"""Wrap a given callback in a try statement."""
try:
func(*args)
except Exception:
etype, exc, tb = sys.exc_info()
stack = traceback.extract_tb(tb)[1:]
formatted_exception = \
'Encountered issue in callback (most recent call last):\n' + \
''.join(traceback.format_list(stack) +
traceback.format_exception_only(etype, exc)).rstrip('\n')
logging.warning(formatted_exception)
return
def check_depth_peeling(number_of_peels=100, occlusion_ratio=0.0):
"""Check if depth peeling is available.
Attempts to use depth peeling to see if it is available for the
current environment. Returns ``True`` if depth peeling is
available and has been successfully leveraged, otherwise
``False``.
"""
# Try Depth Peeling with a basic scene
source = _vtk.vtkSphereSource()
mapper = _vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = _vtk.vtkActor()
actor.SetMapper(mapper)
# requires opacity < 1
actor.GetProperty().SetOpacity(0.5)
renderer = _vtk.vtkRenderer()
renderWindow = _vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindow.SetOffScreenRendering(True)
renderWindow.SetAlphaBitPlanes(True)
renderWindow.SetMultiSamples(0)
renderer.AddActor(actor)
renderer.SetUseDepthPeeling(True)
renderer.SetMaximumNumberOfPeels(number_of_peels)
renderer.SetOcclusionRatio(occlusion_ratio)
renderWindow.Render()
return renderer.GetLastRenderingUsedDepthPeeling() == 1
def threaded(fn):
"""Call a function using a thread."""
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class conditional_decorator:
"""Conditional decorator for methods."""
def __init__(self, dec, condition):
"""Initialize."""
self.decorator = dec
self.condition = condition
def __call__(self, func):
"""Call the decorated function if condition is matched."""
if not self.condition:
# Return the function unchanged, not decorated.
return func
return self.decorator(func)
class ProgressMonitor():
"""A standard class for monitoring the progress of a VTK algorithm.
This must be use in a ``with`` context and it will block keyboard
interrupts from happening until the exit event as interrupts will crash
the kernel if the VTK algorithm is still executing.
"""
def __init__(self, algorithm, message="", scaling=100):
"""Initialize observer."""
try:
from tqdm import tqdm
except ImportError:
raise ImportError("Please install `tqdm` to monitor algorithms.")
self.event_type = _vtk.vtkCommand.ProgressEvent
self.progress = 0.0
self._last_progress = self.progress
self.algorithm = algorithm
self.message = message
self._interrupt_signal_received = False
self._old_progress = 0
self._old_handler = None
self._progress_bar = None
def handler(self, sig, frame):
"""Pass signal to custom interrupt handler."""
self._interrupt_signal_received = (sig, frame)
logging.debug('SIGINT received. Delaying KeyboardInterrupt until '
'VTK algorithm finishes.')
def __call__(self, obj, event, *args):
"""Call progress update callback.
On an event occurrence, this function executes.
"""
if self._interrupt_signal_received:
obj.AbortExecuteOn()
else:
progress = obj.GetProgress()
step = progress - self._old_progress
self._progress_bar.update(step)
self._old_progress = progress
def __enter__(self):
"""Enter event for ``with`` context."""
from tqdm import tqdm
# check if in main thread
if threading.current_thread().__class__.__name__ == '_MainThread':
self._old_handler = signal.signal(signal.SIGINT, self.handler)
self._progress_bar = tqdm(total=1, leave=True,
bar_format='{l_bar}{bar}[{elapsed}<{remaining}]')
self._progress_bar.set_description(self.message)
self.algorithm.AddObserver(self.event_type, self)
return self._progress_bar
def __exit__(self, type, value, traceback):
"""Exit event for ``with`` context."""
self._progress_bar.total = 1
self._progress_bar.refresh()
self._progress_bar.close()
self.algorithm.RemoveObservers(self.event_type)
if threading.current_thread().__class__.__name__ == '_MainThread':
signal.signal(signal.SIGINT, self._old_handler)
def abstract_class(cls_):
"""Decorate a class, overriding __new__.
Preventing a class from being instantiated similar to abc.ABCMeta
but does not require an abstract method.
"""
def __new__(cls, *args, **kwargs):
if cls is cls_:
raise TypeError(f'{cls.__name__} is an abstract class and may not be instantiated.')
return object.__new__(cls)
cls_.__new__ = __new__
return cls_
def axis_rotation(points, angle, inplace=False, deg=True, axis='z'):
"""Rotate points angle (in deg) about an axis.
Parameters
----------
points : numpy.ndarray
Array of points with shape ``(N, 3)``
angle : float
Rotation angle.
inplace : bool, optional
Updates points in-place while returning nothing.
deg : bool, optional
If `True`, the angle is interpreted as degrees instead of
radians. Default is `True`.
axis : str, optional
Name of axis to rotate about. Valid options are ``'x'``, ``'y'``,
and ``'z'``. Default value is ``'z'``.
Returns
-------
points : numpy.ndarray
Rotated points.
Examples
--------
Rotate a set of points by 90 degrees about the x-axis in-place.
>>> import numpy as np
>>> import pyvista
>>> from pyvista import examples
>>> points = examples.load_airplane().points
>>> points_orig = points.copy()
>>> pyvista.axis_rotation(points, 90, axis='x', deg=True, inplace=True)
>>> assert np.all(np.isclose(points[:, 0], points_orig[:, 0]))
>>> assert np.all(np.isclose(points[:, 1], -points_orig[:, 2]))
>>> assert np.all(np.isclose(points[:, 2], points_orig[:, 1]))
"""
axis = axis.lower()
axis_to_vec = {
'x': (1, 0, 0),
'y': (0, 1, 0),
'z': (0, 0, 1)
}
if axis not in axis_to_vec:
raise ValueError('Invalid axis. Must be either "x", "y", or "z"')
rot_mat = transformations.axis_angle_rotation(axis_to_vec[axis], angle, deg=deg)
return transformations.apply_transformation_to_points(rot_mat, points, inplace=inplace)
def cubemap(path='', prefix='', ext='.jpg'):
"""Construct a cubemap from 6 images.
Each of the 6 images must be in the following format:
- <prefix>negx<ext>
- <prefix>negy<ext>
- <prefix>negz<ext>
- <prefix>posx<ext>
- <prefix>posy<ext>
- <prefix>posz<ext>
Prefix may be empty, and extension will default to ``'.jpg'``
For example, if you have 6 images with the skybox2 prefix:
- ``'skybox2-negx.jpg'``
- ``'skybox2-negy.jpg'``
- ``'skybox2-negz.jpg'``
- ``'skybox2-posx.jpg'``
- ``'skybox2-posy.jpg'``
- ``'skybox2-posz.jpg'``
Parameters
----------
prefix : str, optional
Prefix to the filename.
ext : str, optional
The filename extension. For example ``'.jpg'``.
path : str, optional
Directory containing the cubemap images.
Returns
-------
pyvista.Texture
Texture with cubemap.
Examples
--------
>>> import pyvista
>>> skybox = pyvista.cubemap('my_directory', 'skybox', '.jpeg') # doctest:+SKIP
"""
sets = ['posx', 'negx', 'posy', 'negy', 'posz', 'negz']
image_paths = [os.path.join(path, f'{prefix}{suffix}{ext}') for suffix in sets]
for image_path in image_paths:
if not os.path.isfile(image_path):
file_str = '\n'.join(image_paths)
raise FileNotFoundError(f'Unable to locate {image_path}\n'
'Expected to find the following files:\n'
f'{file_str}')
texture = pyvista.Texture()
texture.cube_map = True # Must be set prior to setting images
# add each image to the cubemap
for i, fn in enumerate(image_paths):
image = pyvista.read(fn)
flip = _vtk.vtkImageFlip()
flip.SetInputDataObject(image)
flip.SetFilteredAxis(1) # flip y axis
flip.Update()
texture.SetInputDataObject(i, flip.GetOutput())
return texture
|
|
#!/usr/bin/python
#
# Copyright (C) 2007 SIOS Technology, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'tmatsuo@sios.com (Takashi MATSUO)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import urllib
import gdata
import atom.service
import gdata.service
import gdata.apps
import atom
API_VER="2.0"
HTTP_OK=200
UNKOWN_ERROR=1000
USER_DELETED_RECENTLY=1100
USER_SUSPENDED=1101
DOMAIN_USER_LIMIT_EXCEEDED=1200
DOMAIN_ALIAS_LIMIT_EXCEEDED=1201
DOMAIN_SUSPENDED=1202
DOMAIN_FEATURE_UNAVAILABLE=1203
ENTITY_EXISTS=1300
ENTITY_DOES_NOT_EXIST=1301
ENTITY_NAME_IS_RESERVED=1302
ENTITY_NAME_NOT_VALID=1303
INVALID_GIVEN_NAME=1400
INVALID_FAMILY_NAME=1401
INVALID_PASSWORD=1402
INVALID_USERNAME=1403
INVALID_HASH_FUNCTION_NAME=1404
INVALID_HASH_DIGGEST_LENGTH=1405
INVALID_EMAIL_ADDRESS=1406
INVALID_QUERY_PARAMETER_VALUE=1407
TOO_MANY_RECIPIENTS_ON_EMAIL_LIST=1500
DEFAULT_QUOTA_LIMIT='2048'
class Error(Exception):
pass
class AppsForYourDomainException(Error):
def __init__(self, response):
Error.__init__(self, response)
try:
self.element_tree = ElementTree.fromstring(response['body'])
self.error_code = int(self.element_tree[0].attrib['errorCode'])
self.reason = self.element_tree[0].attrib['reason']
self.invalidInput = self.element_tree[0].attrib['invalidInput']
except:
self.error_code = UNKOWN_ERROR
class AppsService(gdata.service.GDataService):
"""Client for the Google Apps Provisioning service."""
def __init__(self, email=None, password=None, domain=None, source=None,
server='apps-apis.google.com', additional_headers=None,
**kwargs):
"""Creates a client for the Google Apps Provisioning service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
domain: string (optional) The Google Apps domain name.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'apps-apis.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='apps', source=source,
server=server, additional_headers=additional_headers, **kwargs)
self.ssl = True
self.port = 443
self.domain = domain
def _baseURL(self):
return "/a/feeds/%s" % self.domain
def GetGeneratorFromLinkFinder(self, link_finder, func):
"""returns a generator for pagination"""
yield link_finder
next = link_finder.GetNextLink()
while next is not None:
next_feed = func(str(self.Get(next.href)))
yield next_feed
next = next_feed.GetNextLink()
def AddAllElementsFromAllPages(self, link_finder, func):
"""retrieve all pages and add all elements"""
next = link_finder.GetNextLink()
while next is not None:
next_feed = self.Get(next.href, converter=func)
for a_entry in next_feed.entry:
link_finder.entry.append(a_entry)
next = next_feed.GetNextLink()
return link_finder
def RetrievePageOfEmailLists(self, start_email_list_name=None):
"""Retrieve one page of email list"""
uri = "%s/emailList/%s" % (self._baseURL(), API_VER)
if start_email_list_name is not None:
uri += "?startEmailListName=%s" % start_email_list_name
try:
return gdata.apps.EmailListFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveAllEmailLists(self):
"""Retrieve all email list of a domain."""
ret = self.RetrievePageOfEmailLists()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListFeedFromString)
def RetrieveEmailList(self, list_name):
"""Retreive a single email list by the list's name."""
uri = "%s/emailList/%s/%s" % (
self._baseURL(), API_VER, list_name)
try:
return self.Get(uri, converter=gdata.apps.EmailListEntryFromString)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveEmailLists(self, recipient):
"""Retrieve All Email List Subscriptions for an Email Address."""
uri = "%s/emailList/%s?recipient=%s" % (
self._baseURL(), API_VER, recipient)
try:
ret = gdata.apps.EmailListFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListFeedFromString)
def RemoveRecipientFromEmailList(self, recipient, list_name):
"""Remove recipient from email list."""
uri = "%s/emailList/%s/%s/recipient/%s" % (
self._baseURL(), API_VER, list_name, recipient)
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfRecipients(self, list_name, start_recipient=None):
"""Retrieve one page of recipient of an email list. """
uri = "%s/emailList/%s/%s/recipient" % (
self._baseURL(), API_VER, list_name)
if start_recipient is not None:
uri += "?startRecipient=%s" % start_recipient
try:
return gdata.apps.EmailListRecipientFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveAllRecipients(self, list_name):
"""Retrieve all recipient of an email list."""
ret = self.RetrievePageOfRecipients(list_name)
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListRecipientFeedFromString)
def AddRecipientToEmailList(self, recipient, list_name):
"""Add a recipient to a email list."""
uri = "%s/emailList/%s/%s/recipient" % (
self._baseURL(), API_VER, list_name)
recipient_entry = gdata.apps.EmailListRecipientEntry()
recipient_entry.who = gdata.apps.Who(email=recipient)
try:
return gdata.apps.EmailListRecipientEntryFromString(
str(self.Post(recipient_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def DeleteEmailList(self, list_name):
"""Delete a email list"""
uri = "%s/emailList/%s/%s" % (self._baseURL(), API_VER, list_name)
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def CreateEmailList(self, list_name):
"""Create a email list. """
uri = "%s/emailList/%s" % (self._baseURL(), API_VER)
email_list_entry = gdata.apps.EmailListEntry()
email_list_entry.email_list = gdata.apps.EmailList(name=list_name)
try:
return gdata.apps.EmailListEntryFromString(
str(self.Post(email_list_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def DeleteNickname(self, nickname):
"""Delete a nickname"""
uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname)
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfNicknames(self, start_nickname=None):
"""Retrieve one page of nicknames in the domain"""
uri = "%s/nickname/%s" % (self._baseURL(), API_VER)
if start_nickname is not None:
uri += "?startNickname=%s" % start_nickname
try:
return gdata.apps.NicknameFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveAllNicknames(self):
"""Retrieve all nicknames in the domain"""
ret = self.RetrievePageOfNicknames()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.NicknameFeedFromString)
def RetrieveNicknames(self, user_name):
"""Retrieve nicknames of the user"""
uri = "%s/nickname/%s?username=%s" % (self._baseURL(), API_VER, user_name)
try:
ret = gdata.apps.NicknameFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.NicknameFeedFromString)
def RetrieveNickname(self, nickname):
"""Retrieve a nickname.
Args:
nickname: string The nickname to retrieve
Returns:
gdata.apps.NicknameEntry
"""
uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname)
try:
return gdata.apps.NicknameEntryFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def CreateNickname(self, user_name, nickname):
"""Create a nickname"""
uri = "%s/nickname/%s" % (self._baseURL(), API_VER)
nickname_entry = gdata.apps.NicknameEntry()
nickname_entry.login = gdata.apps.Login(user_name=user_name)
nickname_entry.nickname = gdata.apps.Nickname(name=nickname)
try:
return gdata.apps.NicknameEntryFromString(
str(self.Post(nickname_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def DeleteUser(self, user_name):
"""Delete a user account"""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def UpdateUser(self, user_name, user_entry):
"""Update a user account."""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return gdata.apps.UserEntryFromString(str(self.Put(user_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def CreateUser(self, user_name, family_name, given_name, password,
suspended='false', quota_limit=None,
password_hash_function=None):
"""Create a user account. """
uri = "%s/user/%s" % (self._baseURL(), API_VER)
user_entry = gdata.apps.UserEntry()
user_entry.login = gdata.apps.Login(
user_name=user_name, password=password, suspended=suspended,
hash_function_name=password_hash_function)
user_entry.name = gdata.apps.Name(family_name=family_name,
given_name=given_name)
if quota_limit is not None:
user_entry.quota = gdata.apps.Quota(limit=str(quota_limit))
try:
return gdata.apps.UserEntryFromString(str(self.Post(user_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def SuspendUser(self, user_name):
user_entry = self.RetrieveUser(user_name)
if user_entry.login.suspended != 'true':
user_entry.login.suspended = 'true'
user_entry = self.UpdateUser(user_name, user_entry)
return user_entry
def RestoreUser(self, user_name):
user_entry = self.RetrieveUser(user_name)
if user_entry.login.suspended != 'false':
user_entry.login.suspended = 'false'
user_entry = self.UpdateUser(user_name, user_entry)
return user_entry
def RetrieveUser(self, user_name):
"""Retrieve an user account.
Args:
user_name: string The user name to retrieve
Returns:
gdata.apps.UserEntry
"""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return gdata.apps.UserEntryFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfUsers(self, start_username=None):
"""Retrieve one page of users in this domain."""
uri = "%s/user/%s" % (self._baseURL(), API_VER)
if start_username is not None:
uri += "?startUsername=%s" % start_username
try:
return gdata.apps.UserFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def GetGeneratorForAllUsers(self):
"""Retrieve a generator for all users in this domain."""
first_page = self.RetrievePageOfUsers()
return self.GetGeneratorFromLinkFinder(first_page,
gdata.apps.UserFeedFromString)
def RetrieveAllUsers(self):
"""Retrieve all users in this domain. OBSOLETE"""
ret = self.RetrievePageOfUsers()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.UserFeedFromString)
class PropertyService(gdata.service.GDataService):
"""Client for the Google Apps Property service."""
def __init__(self, email=None, password=None, domain=None, source=None,
server='apps-apis.google.com', additional_headers=None):
gdata.service.GDataService.__init__(self, email=email, password=password,
service='apps', source=source,
server=server,
additional_headers=additional_headers)
self.ssl = True
self.port = 443
self.domain = domain
def AddAllElementsFromAllPages(self, link_finder, func):
"""retrieve all pages and add all elements"""
next = link_finder.GetNextLink()
while next is not None:
next_feed = self.Get(next.href, converter=func)
for a_entry in next_feed.entry:
link_finder.entry.append(a_entry)
next = next_feed.GetNextLink()
return link_finder
def _GetPropertyEntry(self, properties):
property_entry = gdata.apps.PropertyEntry()
property = []
for name, value in properties.iteritems():
if name is not None and value is not None:
property.append(gdata.apps.Property(name=name, value=value))
property_entry.property = property
return property_entry
def _PropertyEntry2Dict(self, property_entry):
properties = {}
for i, property in enumerate(property_entry.property):
properties[property.name] = property.value
return properties
def _GetPropertyFeed(self, uri):
try:
return gdata.apps.PropertyFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def _GetPropertiesList(self, uri):
property_feed = self._GetPropertyFeed(uri)
# pagination
property_feed = self.AddAllElementsFromAllPages(
property_feed, gdata.apps.PropertyFeedFromString)
properties_list = []
for property_entry in property_feed.entry:
properties_list.append(self._PropertyEntry2Dict(property_entry))
return properties_list
def _GetProperties(self, uri):
try:
return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString(
str(self.Get(uri))))
except gdata.service.RequestError, e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def _PostProperties(self, uri, properties):
property_entry = self._GetPropertyEntry(properties)
try:
return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString(
str(self.Post(property_entry, uri))))
except gdata.service.RequestError, e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def _PutProperties(self, uri, properties):
property_entry = self._GetPropertyEntry(properties)
try:
return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString(
str(self.Put(property_entry, uri))))
except gdata.service.RequestError, e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def _DeleteProperties(self, uri):
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
|
|
# Copyright 2015 Software Freedom Conservancy
# Copyright 2007-2009 WebDriver committers
# Copyright 2007-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import ElementNotVisibleException
from selenium.common.exceptions import InvalidElementStateException
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import UnexpectedAlertPresentException
import unittest
class AlertsTest(unittest.TestCase):
def testShouldBeAbleToOverrideTheWindowAlertMethod(self):
self._loadPage("alerts")
self.driver.execute_script(
"window.alert = function(msg) { document.getElementById('text').innerHTML = msg; }")
self.driver.find_element(by=By.ID, value="alert").click()
try:
self.assertEqual(self.driver.find_element_by_id('text').text, "cheese")
except Exception as e:
# if we're here, likely the alert is displayed
# not dismissing it will affect other tests
try:
self._waitForAlert().dismiss()
except Exception:
pass
raise e
def testShouldAllowUsersToAcceptAnAlertManually(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert()
alert.accept()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowUsersToAcceptAnAlertWithNoTextManually(self):
self._loadPage("alerts")
self.driver.find_element(By.ID,"empty-alert").click();
alert = self._waitForAlert()
alert.accept()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldGetTextOfAlertOpenedInSetTimeout(self):
self._loadPage("alerts")
self.driver.find_element_by_id("slow-alert").click()
# DO NOT WAIT OR SLEEP HERE
# This is a regression test for a bug where only the first switchTo call would throw,
# and only if it happens before the alert actually loads.
alert = self._waitForAlert()
try:
self.assertEqual("Slow", alert.text)
finally:
alert.accept()
@pytest.mark.ignore_chrome
def testShouldAllowUsersToDismissAnAlertManually(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert()
alert.dismiss()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowAUserToAcceptAPrompt(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="prompt").click()
alert = self._waitForAlert()
alert.accept()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowAUserToDismissAPrompt(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="prompt").click()
alert = self._waitForAlert()
alert.dismiss()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowAUserToSetTheValueOfAPrompt(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="prompt").click()
alert = self._waitForAlert()
alert.send_keys("cheese")
alert.accept()
result = self.driver.find_element(by=By.ID, value="text").text
self.assertEqual("cheese", result)
def testSettingTheValueOfAnAlertThrows(self):
self._loadPage("alerts")
self.driver.find_element(By.ID,"alert").click();
alert = self._waitForAlert()
try:
alert.send_keys("cheese");
self.fail("Expected exception");
except ElementNotVisibleException:
pass
except InvalidElementStateException:
pass
finally:
alert.accept()
def testAlertShouldNotAllowAdditionalCommandsIfDimissed(self):
self._loadPage("alerts");
self.driver.find_element(By.ID, "alert").click()
alert = self._waitForAlert()
alert.dismiss()
try:
alert.text
self.fail("Expected NoAlertPresentException")
except NoAlertPresentException:
pass
def testShouldAllowUsersToAcceptAnAlertInAFrame(self):
self._loadPage("alerts")
self.driver.switch_to.frame("iframeWithAlert")
self.driver.find_element_by_id("alertInFrame").click()
alert = self._waitForAlert()
alert.accept()
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowUsersToAcceptAnAlertInANestedFrame(self):
self._loadPage("alerts")
self.driver.switch_to.frame("iframeWithIframe")
self.driver.switch_to.frame("iframeWithAlert")
self.driver.find_element_by_id("alertInFrame").click()
alert = self._waitForAlert()
alert.accept()
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldThrowAnExceptionIfAnAlertHasNotBeenDealtWithAndDismissTheAlert(self):
pass
# //TODO(David) Complete this test
def testPromptShouldUseDefaultValueIfNoKeysSent(self):
self._loadPage("alerts")
self.driver.find_element(By.ID, "prompt-with-default").click()
alert = self._waitForAlert()
alert.accept()
txt = self.driver.find_element(By.ID, "text").text
self.assertEqual("This is a default value", txt)
def testPromptShouldHaveNullValueIfDismissed(self):
self._loadPage("alerts")
self.driver.find_element(By.ID, "prompt-with-default").click()
alert = self._waitForAlert()
alert.dismiss()
self.assertEqual("null", self.driver.find_element(By.ID, "text").text)
def testHandlesTwoAlertsFromOneInteraction(self):
self._loadPage("alerts")
self.driver.find_element(By.ID, "double-prompt").click()
alert1 = self._waitForAlert()
alert1.send_keys("brie")
alert1.accept()
alert2 = self._waitForAlert()
alert2.send_keys("cheddar")
alert2.accept();
self.assertEqual(self.driver.find_element(By.ID, "text1").text, "brie")
self.assertEqual(self.driver.find_element(By.ID, "text2").text, "cheddar")
def testShouldHandleAlertOnPageLoad(self):
self._loadPage("alerts")
self.driver.find_element(By.ID, "open-page-with-onload-alert").click()
alert = self._waitForAlert()
value = alert.text
alert.accept()
self.assertEquals("onload", value)
def testShouldAllowTheUserToGetTheTextOfAnAlert(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert()
value = alert.text
alert.accept()
self.assertEqual("cheese", value)
def testUnexpectedAlertPresentExceptionContainsAlertText(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert()
value = alert.text
try:
self._loadPage("simpleTest")
raise Exception("UnexpectedAlertPresentException should have been thrown")
except UnexpectedAlertPresentException as uape:
self.assertEquals(value, uape.alert_text)
self.assertTrue(str(uape).startswith("Alert Text: %s" % value))
def _waitForAlert(self):
return WebDriverWait(self.driver, 3).until(EC.alert_is_present())
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
try:
# just in case a previous test left open an alert
self.driver.switch_to.alert().dismiss()
except:
pass
self.driver.get(self._pageURL(name))
|
|
#!/usr/bin/env python
# fastqc 0.0.1
# Generated by dx-app-wizard.
#
# Parallelized execution pattern: Your app will generate multiple jobs
# to perform some computation in parallel, followed by a final
# "postprocess" stage that will perform any additional computations as
# necessary.
#
# See https://wiki.dnanexus.com/Developer-Portal for documentation and
# tutorials on how to modify this file.
#
# DNAnexus Python Bindings (dxpy) documentation:
# http://autodoc.dnanexus.com/bindings/python/current/
import os, subprocess, shlex, time
import dxpy
import requests
import json
import re
HEADERS = {'content-type': 'application/json'}
SERVER = 'https://www.encodeproject.org/'
S3_SERVER='s3://encode-files/'
root_dir = os.environ.get('DX_FS_ROOT') or ""
DATA = root_dir+"/opt/data/"
auth = {}
try:
auth = json.load(open(DATA+"keys.json"))
except:
print "Error loading AUTH keys. Please add JSON file with AUTHID and AUTHPW named 'keys.json' to (resources)/opt/data"
exit
#get all the file objects
encValData = DATA+'encValData'
validate_map = {
'bam': ['-type=bam'],
'bed': ['-type=bed6+'], # if this fails we will drop to bed3+
'bedLogR': ['-type=bigBed9+1', '-as=%s/as/bedLogR.as' % encValData],
'bed_bedLogR': ['-type=bed9+1', '-as=%s/as/bedLogR.as' % encValData],
'bedMethyl': ['-type=bigBed9+2', '-as=%s/as/bedMethyl.as' % encValData],
'bed_bedMethyl': ['-type=bed9+2', '-as=%s/as/bedMethyl.as' % encValData],
'bigBed': ['-type=bigBed6+'], # if this fails we will drop to bigBed3+
'bigWig': ['-type=bigWig'],
'broadPeak': ['-type=bigBed6+3', '-as=%s/as/broadPeak.as' % encValData],
'bed_broadPeak': ['-type=bed6+3', '-as=%s/as/broadPeak.as' % encValData],
'fasta': ['-type=fasta'],
'fastq': ['-type=fastq'],
'gtf': None,
'idat': ['-type=idat'],
'narrowPeak': ['-type=bigBed6+4', '-as=%s/as/narrowPeak.as' % encValData],
'bed_narrowPeak': ['-type=bed6+4', '-as=%s/as/narrowPeak.as' % encValData],
'rcc': ['-type=rcc'],
'tar': None,
'tsv': None,
'2bit': None,
'csfasta': ['-type=csfasta'],
'csqual': ['-type=csqual'],
'bedRnaElements': ['-type=bed6+3', '-as=%s/as/bedRnaElements.as' % encValData],
'CEL': None,
}
@dxpy.entry_point("postprocess")
def postprocess(report, valid):
# Change the following to process whatever input this stage
# receives. You may also want to copy and paste the logic to download
# and upload files here as well if this stage receives file input
# and/or makes file output.
#for output in reports:
# pass
return {
"report": report,
"validation": valid
}
@dxpy.entry_point("process")
def process(file_obj, file_meta):
# Change the following to process whatever input this stage
# receives. You may also want to copy and paste the logic to download
# and upload files here as well if this stage receives file input
# and/or makes file output.
print file_obj
print file_meta
filename = dxpy.describe(file_obj)['name']
basename = filename.rstrip('.gz')
dx_file = dxpy.download_dxfile(file_obj, filename)
print "Run Validate Files"
validate_args = validate_map.get(file_meta['file_format'])
assembly = file_meta.get('assembly')
if assembly:
chromInfo = ['-chromInfo=%s/%s/chrom.sizes' % (encValData, assembly)]
else:
chromInfo = ['-chromInfo=%s/hg19/chrom.sizes' % encValData]
print subprocess.check_output(['ls','-l'])
valid = "Not validated yet"
if validate_args is not None:
print("Validating file.")
validation_command = ['validateFiles'] + ['-verbose=2'] + validate_args + chromInfo + ['-doReport'] + [filename]
try:
print " ".join(validation_command)
valid = subprocess.check_output(validation_command)
except subprocess.CalledProcessError as e:
pass
#valid = "Process Error"
print(e.output)
#raise
print valid
print subprocess.check_output(['ls','-l'])
print "Upload result"
report_dxfile = dxpy.upload_local_file("%s.report" % filename)
print report_dxfile
## is_valid == 'Error count 0'
return {
"report": report_dxfile,
"validation": valid
}
@dxpy.entry_point("main")
def main(files):
# The following line(s) initialize your data object inputs on the platform
# into dxpy.DXDataObject instances that you can start using immediately.
#files = [dxpy.DXFile(item) for item in files]
# The following line(s) download your file inputs to the local file system
# using variable names for the filenames.
#for i, f in enumerate(files):
# dxpy.download_dxfile(f.get_id(), "files-" + str(i))
# Split your work into parallel tasks. As an example, the
# following generates 10 subjobs running with the same dummy
# input.
subjobs = []
for file_obj in files:
filename = dxpy.describe(file_obj)['name']
encff = re.compile('ENCFF[0-9]{3}[A-Z]{3}')
try:
file_acc = encff.match(filename).group()
except:
print "Filename %s is not an ENCODE file" % filename
exit(0)
file_meta = requests.get(SERVER+'/'+file_acc+'/?frame=embedded', \
auth=(auth['AUTHID'],auth['AUTHPW']), headers=HEADERS).json()
subjob_input = {
"file_obj": file_obj,
"file_meta": file_meta
}
subjobs.append(dxpy.new_dxjob(subjob_input, "process"))
# The following line creates the job that will perform the
# "postprocess" step of your app. We've given it an input field
# that is a list of job-based object references created from the
# "process" jobs we just created. Assuming those jobs have an
# output field called "output", these values will be passed to the
# "postprocess" job. Because these values are not ready until the
# "process" jobs finish, the "postprocess" job WILL NOT RUN until
# all job-based object references have been resolved (i.e. the
# jobs they reference have finished running).
#
# If you do not plan to have the "process" jobs create output that
# the "postprocess" job will require, then you can explicitly list
# the dependencies to wait for those jobs to finish by setting the
# "depends_on" field to the list of subjobs to wait for (it
# accepts either dxpy handlers or string IDs in the list). We've
# included this parameter in the line below as well for
# completeness, though it is unnecessary if you are providing
# job-based object references in the input that refer to the same
# set of jobs.
postprocess_job = dxpy.new_dxjob(fn_input={
"report": [subjob.get_output_ref("report") for subjob in subjobs],
"valid": [subjob.get_output_ref("validation") for subjob in subjobs]
},
fn_name="postprocess",
depends_on=subjobs)
# If you would like to include any of the output fields from the
# postprocess_job as the output of your app, you should return it
# here using a job-based object reference. If the output field in
# the postprocess function is called "answer", you can pass that
# on here as follows:
#
#return { "FastQC_reports": [ dxpy.dxlink(item) for item in postprocess_job.get_output_ref("report") ]}
#
# Tip: you can include in your output at this point any open
# objects (such as gtables) which will be closed by a job that
# finishes later. The system will check to make sure that the
# output object is closed and will attempt to clone it out as
# output into the parent container only after all subjobs have
# finished.
validate_reports = []
validations = []
validate_reports.append(postprocess_job.get_output_ref("report"))
validations.append(postprocess_job.get_output_ref("validation"))
output = {}
print validate_reports
print validations
# output["FastQC_reports"] = [ dxpy.dxlink(item) for item in FastQC_reports]
output["validate_reports"] = validate_reports
output["validate_errors"] = validations
return output
dxpy.run()
|
|
from __future__ import division, print_function, absolute_import
import tensorflow as tf
import functools
import numpy as np
from selfsup.util import DummyDict
from selfsup import ops, caffe
from selfsup.moving_averages import ExponentialMovingAverageExtended
import sys
def _pretrained_resnet_conv_weights_initializer(name, data, info=None, full_info=None, pre_adjust_batch_norm=False, bn_name=None, scale_name=None):
shape = None
#callback = lambda x: x
if name in data and '0' in data[name]:
W = data[name]['0'].copy()
if W.ndim == 2 and name == 'fc1000':
W = W.reshape((W.shape[0], -1, 1, 1))
W = W.transpose(2, 3, 1, 0)
init_type = 'file'
if name == 'conv1' and W.shape[2] == 3:
W = W[:, :, ::-1]
init_type += ':bgr-flipped'
init = tf.constant_initializer(W)
#if full_info['config']['return_weights']:
#full_info['weights'][name+':weights'] = W
shape = W.shape
else:
init_type = 'init'
init = tf.contrib.layers.variance_scaling_initializer()
if info is not None:
info[name + '/weights'] = init_type
return init, shape
def _pretrained_resnet_inner_weights_initializer(name, data, info=DummyDict(), full_info=DummyDict(), pre_adjust_batch_norm=False, bn_name=None):
shape = None
mu = 0.0
sg = 1.0
if name in data and '0' in data[name]:
W = data[name]['0']
W = W.T
init_type = 'file'
#if pre_adjust_batch_norm and bn_name is not None and bn_name in data:
# bn_data = data[bn_name]
# sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2'])
# W /= sigma
# init_type += ':batch-adjusted'
if pre_adjust_batch_norm and bn_name is not None and bn_name in data:
bn_data = data[bn_name]
bn_sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2'])
sc_sigma = data[scale_name]['0']
#W /= bn_sigma / sc_sigma
#callback = lambda x: x * sc_sigma / bn_sigma
#mu = -bn_data['0'] / bn_data['2'] * sc_sigma / bn_sigma
mu = data[scale_name]['1'] - bn_data['0'] / bn_data['2'] * sc_sigma / bn_sigma
#mu = data[scale_name]['1']
sg = sc_sigma / bn_sigma
init_type += ':batch-adjusted'#(W*={})'.format(sc_sigma / bn_sigma)
init = tf.constant_initializer(W.copy())
#if full_info['config']['return_weights']:
#full_info['weights'][name+':weights'] = W
shape = W.shape
else:
init_type = 'init'
init = tf.contrib.layers.variance_scaling_initializer()
info[name + '/weights'] = init_type
return init, shape, mu, sg
def _pretrained_resnet_biases_initializer(name, data, info=DummyDict(), full_info=DummyDict(), pre_adjust_batch_norm=False, bn_name=None, scale_name=None):
shape = None
#callback = lambda x: x
if name in data and '1' in data[name]:
init_type = 'file'
sc_sigma = data[name]['0'].copy()
sc_bias = data[name]['1'].copy()
#if pre_adjust_batch_norm and scale_name is not None and bn_name is not None and bn_name in data:
if pre_adjust_batch_norm and bn_name is not None and bn_name in data:
bn_data = data[bn_name]
bn_sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2'])
mu = bn_data['0'] / bn_data['2']
#sc_bias = sc_bias - mu * sc_sigma / bn_sigma
#callback = lambda x: x - mu * sc_sigma / bn_sigma
#sc_bias = -mu / bn_sigma
#sc_bias = -mu
sc_bias = np.zeros_like(mu)
init_type += ':batch-adjusted'#(b-={})'.format(mu*sc_sigma/bn_sigma)
init = tf.constant_initializer(sc_bias)
#if full_info['config']['return_weights']:
#full_info['weights'][name+':biases'] = sc_bias
shape = sc_bias.shape
else:
init_type = 'init'
init = tf.constant_initializer(0.0)
info[name + '/biases'] = init_type
return init, shape#, callback
def resnet_conv(x, channels, size=3, padding='SAME', stride=1, batch_norm=False,
phase_test=None, activation=tf.nn.relu, name=None,
parameter_name=None, bn_name=None, scale_name=None, summarize_scale=False, info=DummyDict(), parameters={},
pre_adjust_batch_norm=False, iteration=None):
if parameter_name is None:
parameter_name = name
if scale_name is None:
scale_name = parameter_name
with tf.name_scope(name):
features = int(x.get_shape()[3])
f = channels
shape = [size, size, features, f]
W_init, W_shape = _pretrained_resnet_conv_weights_initializer(parameter_name, parameters,
info=info.get('init'),
full_info=info)
#b_init, b_shape = _pretrained_resnet_biases_initializer(scale_name, parameters,
#info=info.get('init'),
#full_info=info,
#pre_adjust_batch_norm=pre_adjust_batch_norm,
#bn_name=bn_name)
assert W_shape is None or tuple(W_shape) == tuple(shape), "Incorrect weights shape for {} (file: {}, spec: {})".format(name, W_shape, shape)
with tf.variable_scope(name):
W = tf.get_variable('weights', shape, dtype=tf.float32,
initializer=W_init)
raw_conv0 = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=padding)
#conv0 = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding)
if stride > 1:
conv0 = tf.strided_slice(raw_conv0, [0, 0, 0, 0], raw_conv0.get_shape(), [1, 2, 2, 1])
else:
conv0 = raw_conv0
z = conv0
if True:
assert phase_test is not None, "phase_test required for batch norm"
if bn_name in parameters:
bn_data = parameters[bn_name]
bn_mean = bn_data['0'] / bn_data['2']
bn_var = bn_data['1'] / bn_data['2']
else:
bn_mean = np.zeros(f, dtype=np.float32)
bn_var = np.full(f, 0.5, dtype=np.float32) # a bit strange, but we don't know
if scale_name in parameters:
mu = parameters[scale_name]['1']
sg = parameters[scale_name]['0']
else:
mu = np.zeros(f, dtype=np.float32)
sg = np.ones(f, dtype=np.float32)
mm, vv = tf.nn.moments(z, [0, 1, 2], name='mommy')
assert mu.size == f
assert sg.size == f
beta = tf.Variable(tf.constant(mu, shape=[f]), name='beta', trainable=True)
gamma = tf.Variable(tf.constant(sg, shape=[f]), name='gamma', trainable=True)
ema = ExponentialMovingAverageExtended(decay=0.999, value=[bn_mean, bn_var],
num_updates=iteration)
def mean_var_train():
ema_apply_op = ema.apply([mm, vv])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(mm), tf.identity(vv)
def mean_var_test():
return ema.average(mm), ema.average(vv)
mean, var = tf.cond(~phase_test,
mean_var_train,
mean_var_test)
info['activations']['last_mean'] = mean
info['activations']['last_var'] = var
z = tf.nn.batch_normalization(z, mean, var, beta, gamma, 1e-5)
info['activations']['preact_' + name] = z
if activation is not None:
z = activation(z)
if info.get('scale_summary'):
with tf.name_scope('activation'):
tf.summary.scalar('activation/' + name, tf.sqrt(tf.reduce_mean(z**2)))
info['activations'][name] = z
if 'weights' in info:
info['weights'][name + ':weights'] = W
#info['weights'][name + ':biases'] = b
return z
def resnet_atrous_conv(x, channels, size=3, padding='SAME', stride=1, hole=1, batch_norm=False,
phase_test=None, activation=tf.nn.relu, name=None,
parameter_name=None, bn_name=None, scale_name=None, summarize_scale=False, info=DummyDict(), parameters={},
pre_adjust_batch_norm=False):
if parameter_name is None:
parameter_name = name
if scale_name is None:
scale_name = parameter_name
with tf.name_scope(name):
features = int(x.get_shape()[3])
f = channels
shape = [size, size, features, f]
W_init, W_shape = _pretrained_resnet_conv_weights_initializer(parameter_name, parameters,
info=info.get('init'),
pre_adjust_batch_norm=pre_adjust_batch_norm,
bn_name=bn_name, scale_name=scale_name)
b_init, b_shape = _pretrained_resnet_biases_initializer(scale_name, parameters,
info=info.get('init'),
pre_adjust_batch_norm=pre_adjust_batch_norm,
bn_name=bn_name)
assert W_shape is None or tuple(W_shape) == tuple(shape), "Incorrect weights shape for {} (file: {}, spec: {})".format(name, W_shape, shape)
assert b_shape is None or tuple(b_shape) == (f,), "Incorrect bias shape for {} (file: {}, spec; {})".format(name, b_shape, (f,))
with tf.variable_scope(name):
W = tf.get_variable('weights', shape, dtype=tf.float32,
initializer=W_init)
b = tf.get_variable('biases', [f], dtype=tf.float32,
initializer=b_init)
if hole == 1:
raw_conv0 = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=padding)
else:
assert stride == 1
raw_conv0 = tf.nn.atrous_conv2d(x, W, rate=hole, padding=padding)
#conv0 = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding)
if stride > 1:
conv0 = tf.strided_slice(raw_conv0, [0, 0, 0, 0], raw_conv0.get_shape(), [1, stride, stride, 1])
else:
conv0 = raw_conv0
h1 = tf.reshape(tf.nn.bias_add(conv0, b), conv0.get_shape())
z = h1
if activation is not None:
z = activation(z)
if info.get('scale_summary'):
with tf.name_scope('activation'):
tf.summary.scalar('activation/' + name, tf.sqrt(tf.reduce_mean(z**2)))
info['activations'][name] = z
return z
def resnet_inner(x, channels, info=DummyDict(), stddev=None,
activation=tf.nn.relu, name=None, parameters={},
parameter_name=None):
if parameter_name is None:
parameter_name = name
with tf.name_scope(name):
f = channels
features = np.prod(x.get_shape().as_list()[1:])
xflat = tf.reshape(x, [-1, features])
shape = [features, channels]
W_init, W_shape, mu, sg = _pretrained_resnet_inner_weights_initializer(parameter_name, parameters, info=info.get('init'))
b_init, b_shape = _pretrained_resnet_biases_initializer(parameter_name, parameters, info=info.get('init'))
assert W_shape is None or tuple(W_shape) == tuple(shape), "Incorrect weights shape for {} (file: {}, spec: {})".format(name, W_shape, shape)
assert b_shape is None or tuple(b_shape) == (f,), "Incorrect bias shape for {} (file: {}, spec; {})".format(name, b_shape, (f,))
with tf.variable_scope(name):
W = tf.get_variable('weights', shape, dtype=tf.float32,
initializer=W_init)
#b = tf.get_variable('biases', [f], dtype=tf.float32,
#initializer=b_init)
z = tf.matmul(xflat, W)
z = z * sg + mu
#z = tf.nn.bias_add(z, b)
if activation is not None:
z = activation(z)
info['activations'][name] = z
if info.get('scale_summary'):
with tf.name_scope('activation'):
tf.summary.scalar('activation/' + name, tf.sqrt(tf.reduce_mean(z**2)))
return z
def build_network(x, info=DummyDict(), parameters={},
phase_test=None, convolutional=False, final_layer=True,
pre_adjust_batch_norm=False,
num_features_mult=1.0, iteration=None):
# Set up VGG-16
conv = functools.partial(resnet_conv, size=3, parameters=parameters,
info=info, pre_adjust_batch_norm=pre_adjust_batch_norm,
phase_test=phase_test, iteration=iteration)
pool = functools.partial(ops.max_pool, info=info)
avg_pool = functools.partial(ops.avg_pool, info=info)
dropout = functools.partial(ops.dropout, phase_test=phase_test, info=info)
def num(f):
return int(f * num_features_mult)
z = x
conv1 = conv(z, num(64), size=7, stride=2, name='conv1', bn_name='bn_conv1',
scale_name='scale_conv1')
pool1 = pool(conv1, 3, stride=2, name='pool1')
res2a_branch1 = conv(pool1, num(256), size=1, name='res2a_branch1', bn_name='bn2a_branch1',
scale_name='scale2a_branch1', activation=None)
res2a_branch2a = conv(pool1, num(64), size=1, name='res2a_branch2a', bn_name='bn2a_branch2a',
scale_name='scale2a_branch2a')
res2a_branch2b = conv(res2a_branch2a, num(64), size=3, name='res2a_branch2b', bn_name='bn2a_branch2b',
scale_name='scale2a_branch2b')
res2a_branch2c = conv(res2a_branch2b, num(256), size=1, name='res2a_branch2c', bn_name='bn2a_branch2c',
scale_name='scale2a_branch2c', activation=None)
res2a_preact = tf.add(res2a_branch1, res2a_branch2c)
info['activations']['preact_res2a'] = res2a_preact
res2a = tf.nn.relu(res2a_preact, name='res2a')
info['activations']['res2a'] = res2a
# ---
"""
:call nobias-conv 1 0 1 64 res2a res2b_branch2a
:call batch-norm res2b_branch2a bn2b_branch2a
:call bias res2b_branch2a scale2b_branch2a
:call relu res2b_branch2a
:#
:call nobias-conv 3 1 1 64 res2b_branch2a res2b_branch2b
:call batch-norm res2b_branch2b bn2b_branch2b
:call bias res2b_branch2b scale2b_branch2b
:call relu res2b_branch2b
:#
:call nobias-conv 1 0 1 256 res2b_branch2b res2b_branch2c
:call batch-norm res2b_branch2c bn2b_branch2c
:call bias res2b_branch2c scale2b_branch2c
:call add res2a res2b_branch2c res2b
:call relu res2b
"""
def block(x, ch1, ch2, b):
output = 'res{}'.format(b)
branch2a = conv(x, num(ch1), size=1, name='res{}_branch2a'.format(b),
bn_name='bn{}_branch2a'.format(b),
scale_name='scale{}_branch2a'.format(b))
branch2b = conv(branch2a, num(ch1), size=3, name='res{}_branch2b'.format(b),
bn_name='bn{}_branch2b'.format(b),
scale_name='scale{}_branch2b'.format(b))
branch2c = conv(branch2b, num(ch2), size=1, name='res{}_branch2c'.format(b),
bn_name='bn{}_branch2c'.format(b),
scale_name='scale{}_branch2c'.format(b), activation=None)
z0 = tf.add(x, branch2c)
info['activations']['preact_' + output] = z0
z = tf.nn.relu(z0, name=output)
info['activations'][output] = z
return z
"""
:call nobias-conv 1 0 2 ${ch2} res${a} res${b}_branch1
:call batch-norm res${b}_branch1 bn${b}_branch1
:call bias res${b}_branch1 scale${b}_branch1
:#
:call nobias-conv 1 0 2 ${ch1} res${a} res${b}_branch2a
:call batch-norm res${b}_branch2a bn${b}_branch2a
:call bias res${b}_branch2a scale${b}_branch2a
:call relu res${b}_branch2a
:#
:call nobias-conv 3 1 1 ${ch1} res${b}_branch2a res${b}_branch2b
:call batch-norm res${b}_branch2b bn${b}_branch2b
:call bias res${b}_branch2b scale${b}_branch2b
:call relu res${b}_branch2b
:#
:call nobias-conv 1 0 1 ${ch2} res${b}_branch2b res${b}_branch2c
:call batch-norm res${b}_branch2c bn${b}_branch2c
:call bias res${b}_branch2c scale${b}_branch2c
:call add res${b}_branch1 res${b}_branch2c res${b}
:call relu res${b}
"""
def block_reduce(x, ch1, ch2, b, stride=2):
output = 'res{}'.format(b)
branch1 = conv(x, num(ch2), size=1, stride=stride,
name='res{}_branch1'.format(b),
bn_name='bn{}_branch1'.format(b),
scale_name='scale{}_branch1'.format(b),
activation=None)
branch2a = conv(x, num(ch1), size=1, stride=stride,
name='res{}_branch2a'.format(b),
bn_name='bn{}_branch2a'.format(b),
scale_name='scale{}_branch2a'.format(b))
branch2b = conv(branch2a, num(ch1), size=3,
name='res{}_branch2b'.format(b),
bn_name='bn{}_branch2b'.format(b),
scale_name='scale{}_branch2b'.format(b))
branch2c = conv(branch2b, num(ch2), size=1,
name='res{}_branch2c'.format(b),
bn_name='bn{}_branch2c'.format(b),
scale_name='scale{}_branch2c'.format(b), activation=None)
z0 = tf.add(branch1, branch2c)
info['activations']['preact_' + output] = z0
z = tf.nn.relu(z0, name=output)
#z = tf.nn.relu(tf.add(branch1, branch2c), name=output)
info['activations'][output] = z
return z
res2b = block(res2a, 64, 256, '2b')
res2c = block(res2b, 64, 256, '2c')
res3a = block_reduce(res2c, 128, 512, '3a')
"""
:call resnet 128 512 3a 3b1
:call resnet 128 512 3b1 3b2
:call resnet 128 512 3b2 3b3
:call resnet 128 512 3b3 3b4
:call resnet 128 512 3b4 3b5
:call resnet 128 512 3b5 3b6
:call resnet 128 512 3b6 3b7
"""
res3b1 = block(res3a, 128, 512, '3b1')
res3b2 = block(res3b1, 128, 512, '3b2')
res3b3 = block(res3b2, 128, 512, '3b3')
res3b4 = block(res3b3, 128, 512, '3b4')
res3b5 = block(res3b4, 128, 512, '3b5')
res3b6 = block(res3b5, 128, 512, '3b6')
res3b7 = block(res3b6, 128, 512, '3b7')
"""
:call resnet-reduce 256 1024 3b7 4a
"""
res4a = block_reduce(res3b7, 256, 1024, '4a')
"""
:call resnet 256 1024 4a 4b1
:call resnet 256 1024 4b1 4b2
:call resnet 256 1024 4b2 4b3
:call resnet 256 1024 4b3 4b4
:call resnet 256 1024 4b4 4b5
:call resnet 256 1024 4b5 4b6
:call resnet 256 1024 4b6 4b7
:call resnet 256 1024 4b7 4b8
:call resnet 256 1024 4b8 4b9
:call resnet 256 1024 4b9 4b10
:call resnet 256 1024 4b10 4b11
:call resnet 256 1024 4b11 4b12
:call resnet 256 1024 4b12 4b13
:call resnet 256 1024 4b13 4b14
:call resnet 256 1024 4b14 4b15
:call resnet 256 1024 4b15 4b16
:call resnet 256 1024 4b16 4b17
:call resnet 256 1024 4b17 4b18
:call resnet 256 1024 4b18 4b19
:call resnet 256 1024 4b19 4b20
:call resnet 256 1024 4b20 4b21
:call resnet 256 1024 4b21 4b22
:call resnet 256 1024 4b22 4b23
:call resnet 256 1024 4b23 4b24
:call resnet 256 1024 4b24 4b25
:call resnet 256 1024 4b25 4b26
:call resnet 256 1024 4b26 4b27
:call resnet 256 1024 4b27 4b28
:call resnet 256 1024 4b28 4b29
:call resnet 256 1024 4b29 4b30
:call resnet 256 1024 4b30 4b31
:call resnet 256 1024 4b31 4b32
:call resnet 256 1024 4b32 4b33
:call resnet 256 1024 4b33 4b34
:call resnet 256 1024 4b34 4b35
"""
res4b1 = block(res4a, 256, 1024, '4b1')
res4b2 = block(res4b1, 256, 1024, '4b2')
res4b3 = block(res4b2, 256, 1024, '4b3')
res4b4 = block(res4b3, 256, 1024, '4b4')
res4b5 = block(res4b4, 256, 1024, '4b5')
res4b6 = block(res4b5, 256, 1024, '4b6')
res4b7 = block(res4b6, 256, 1024, '4b7')
res4b8 = block(res4b7, 256, 1024, '4b8')
res4b9 = block(res4b8, 256, 1024, '4b9')
res4b10 = block(res4b9, 256, 1024, '4b10')
res4b11 = block(res4b10, 256, 1024, '4b11')
res4b12 = block(res4b11, 256, 1024, '4b12')
res4b13 = block(res4b12, 256, 1024, '4b13')
res4b14 = block(res4b13, 256, 1024, '4b14')
res4b15 = block(res4b14, 256, 1024, '4b15')
res4b16 = block(res4b15, 256, 1024, '4b16')
res4b17 = block(res4b16, 256, 1024, '4b17')
res4b18 = block(res4b17, 256, 1024, '4b18')
res4b19 = block(res4b18, 256, 1024, '4b19')
res4b20 = block(res4b19, 256, 1024, '4b20')
res4b21 = block(res4b20, 256, 1024, '4b21')
res4b22 = block(res4b21, 256, 1024, '4b22')
res4b23 = block(res4b22, 256, 1024, '4b23')
res4b24 = block(res4b23, 256, 1024, '4b24')
res4b25 = block(res4b24, 256, 1024, '4b25')
res4b26 = block(res4b25, 256, 1024, '4b26')
res4b27 = block(res4b26, 256, 1024, '4b27')
res4b28 = block(res4b27, 256, 1024, '4b28')
res4b29 = block(res4b28, 256, 1024, '4b29')
res4b30 = block(res4b29, 256, 1024, '4b30')
res4b31 = block(res4b30, 256, 1024, '4b31')
res4b32 = block(res4b31, 256, 1024, '4b32')
res4b33 = block(res4b32, 256, 1024, '4b33')
res4b34 = block(res4b33, 256, 1024, '4b34')
res4b35 = block(res4b34, 256, 1024, '4b35')
"""
:call resnet-reduce 512 2048 4b35 5a
"""
res5a = block_reduce(res4b35, 512, 2048, '5a')
"""
:call resnet 512 2048 5a 5b
:call resnet 512 2048 5b 5c
"""
res5b = block(res5a, 512, 2048, '5b')
res5c = block(res5b, 512, 2048, '5c')
"""
layer {
bottom: "res5c"
top: "pool5"
name: "pool5"
type: "Pooling"
pooling_param {
kernel_size: 7
stride: 1
pool: AVE
}
}
"""
if final_layer:
pool5 = avg_pool(res5c, 7, stride=1, name='pool5', padding='VALID')
if convolutional:
z = conv(pool5, 1000, size=1, name='fc1000', activation=None)
else:
z = resnet_inner(pool5, 1000, info=info, parameters=parameters, activation=None, name='fc1000')
else:
z = res5c
return z
def build_network_atrous2(x, info=DummyDict(), parameters={},
phase_test=None, convolutional=False, final_layer=True,
pre_adjust_batch_norm=False):
# Set up VGG-16
conv = functools.partial(resnet_conv, size=3, parameters=parameters,
info=info, pre_adjust_batch_norm=pre_adjust_batch_norm)
aconv = functools.partial(resnet_atrous_conv, size=3, parameters=parameters,
info=info, pre_adjust_batch_norm=pre_adjust_batch_norm)
pool = functools.partial(ops.max_pool, info=info)
avg_pool = functools.partial(ops.avg_pool, info=info)
dropout = functools.partial(ops.dropout, phase_test=phase_test, info=info)
z = x
conv1 = conv(z, 64, size=7, stride=2, name='conv1', bn_name='bn_conv1',
scale_name='scale_conv1')
pool1 = pool(conv1, 3, stride=2, name='pool1')
res2a_branch1 = conv(pool1, 256, size=1, name='res2a_branch1', bn_name='bn2a_branch1',
scale_name='scale2a_branch1', activation=None)
res2a_branch2a = conv(pool1, 64, size=1, name='res2a_branch2a', bn_name='bn2a_branch2a',
scale_name='scale2a_branch2a')
res2a_branch2b = conv(res2a_branch2a, 64, size=3, name='res2a_branch2b', bn_name='bn2a_branch2b',
scale_name='scale2a_branch2b')
res2a_branch2c = conv(res2a_branch2b, 256, size=1, name='res2a_branch2c', bn_name='bn2a_branch2c',
scale_name='scale2a_branch2c', activation=None)
res2a = tf.nn.relu(tf.add(res2a_branch1, res2a_branch2c), name='res2a')
info['activations']['res2a'] = res2a
# ---
"""
:call nobias-conv 1 0 1 64 res2a res2b_branch2a
:call batch-norm res2b_branch2a bn2b_branch2a
:call bias res2b_branch2a scale2b_branch2a
:call relu res2b_branch2a
:#
:call nobias-conv 3 1 1 64 res2b_branch2a res2b_branch2b
:call batch-norm res2b_branch2b bn2b_branch2b
:call bias res2b_branch2b scale2b_branch2b
:call relu res2b_branch2b
:#
:call nobias-conv 1 0 1 256 res2b_branch2b res2b_branch2c
:call batch-norm res2b_branch2c bn2b_branch2c
:call bias res2b_branch2c scale2b_branch2c
:call add res2a res2b_branch2c res2b
:call relu res2b
"""
def block(x, ch1, ch2, b, hole=1):
output = 'res{}'.format(b)
branch2a = aconv(x, ch1, size=1, hole=hole, name='res{}_branch2a'.format(b),
bn_name='bn{}_branch2a'.format(b),
scale_name='scale{}_branch2a'.format(b))
branch2b = aconv(branch2a, ch1, size=3, hole=hole, name='res{}_branch2b'.format(b),
bn_name='bn{}_branch2b'.format(b),
scale_name='scale{}_branch2b'.format(b))
branch2c = aconv(branch2b, ch2, size=1, hole=hole, name='res{}_branch2c'.format(b),
bn_name='bn{}_branch2c'.format(b),
scale_name='scale{}_branch2c'.format(b), activation=None)
z = tf.nn.relu(tf.add(x, branch2c), name=output)
info['activations'][output] = z
return z
"""
:call nobias-conv 1 0 2 ${ch2} res${a} res${b}_branch1
:call batch-norm res${b}_branch1 bn${b}_branch1
:call bias res${b}_branch1 scale${b}_branch1
:#
:call nobias-conv 1 0 2 ${ch1} res${a} res${b}_branch2a
:call batch-norm res${b}_branch2a bn${b}_branch2a
:call bias res${b}_branch2a scale${b}_branch2a
:call relu res${b}_branch2a
:#
:call nobias-conv 3 1 1 ${ch1} res${b}_branch2a res${b}_branch2b
:call batch-norm res${b}_branch2b bn${b}_branch2b
:call bias res${b}_branch2b scale${b}_branch2b
:call relu res${b}_branch2b
:#
:call nobias-conv 1 0 1 ${ch2} res${b}_branch2b res${b}_branch2c
:call batch-norm res${b}_branch2c bn${b}_branch2c
:call bias res${b}_branch2c scale${b}_branch2c
:call add res${b}_branch1 res${b}_branch2c res${b}
:call relu res${b}
"""
def block_reduce(x, ch1, ch2, b, stride=2):
output = 'res{}'.format(b)
branch1 = conv(x, ch2, size=1, stride=stride,
name='res{}_branch1'.format(b),
bn_name='bn{}_branch1'.format(b),
scale_name='scale{}_branch1'.format(b),
activation=None)
branch2a = conv(x, ch1, size=1, stride=stride,
name='res{}_branch2a'.format(b),
bn_name='bn{}_branch2a'.format(b),
scale_name='scale{}_branch2a'.format(b))
branch2b = conv(branch2a, ch1, size=3,
name='res{}_branch2b'.format(b),
bn_name='bn{}_branch2b'.format(b),
scale_name='scale{}_branch2b'.format(b))
branch2c = conv(branch2b, ch2, size=1,
name='res{}_branch2c'.format(b),
bn_name='bn{}_branch2c'.format(b),
scale_name='scale{}_branch2c'.format(b), activation=None)
z = tf.nn.relu(tf.add(branch1, branch2c), name=output)
info['activations'][output] = z
return z
res2b = block(res2a, 64, 256, '2b')
res2c = block(res2b, 64, 256, '2c')
res3a = block_reduce(res2c, 128, 512, '3a')
"""
:call resnet 128 512 3a 3b1
:call resnet 128 512 3b1 3b2
:call resnet 128 512 3b2 3b3
:call resnet 128 512 3b3 3b4
:call resnet 128 512 3b4 3b5
:call resnet 128 512 3b5 3b6
:call resnet 128 512 3b6 3b7
"""
res3b1 = block(res3a, 128, 512, '3b1')
res3b2 = block(res3b1, 128, 512, '3b2')
res3b3 = block(res3b2, 128, 512, '3b3')
res3b4 = block(res3b3, 128, 512, '3b4')
res3b5 = block(res3b4, 128, 512, '3b5')
res3b6 = block(res3b5, 128, 512, '3b6')
res3b7 = block(res3b6, 128, 512, '3b7')
"""
:call resnet-reduce 256 1024 3b7 4a
"""
res4a = block_reduce(res3b7, 256, 1024, '4a')
"""
:call resnet 256 1024 4a 4b1
:call resnet 256 1024 4b1 4b2
:call resnet 256 1024 4b2 4b3
:call resnet 256 1024 4b3 4b4
:call resnet 256 1024 4b4 4b5
:call resnet 256 1024 4b5 4b6
:call resnet 256 1024 4b6 4b7
:call resnet 256 1024 4b7 4b8
:call resnet 256 1024 4b8 4b9
:call resnet 256 1024 4b9 4b10
:call resnet 256 1024 4b10 4b11
:call resnet 256 1024 4b11 4b12
:call resnet 256 1024 4b12 4b13
:call resnet 256 1024 4b13 4b14
:call resnet 256 1024 4b14 4b15
:call resnet 256 1024 4b15 4b16
:call resnet 256 1024 4b16 4b17
:call resnet 256 1024 4b17 4b18
:call resnet 256 1024 4b18 4b19
:call resnet 256 1024 4b19 4b20
:call resnet 256 1024 4b20 4b21
:call resnet 256 1024 4b21 4b22
:call resnet 256 1024 4b22 4b23
:call resnet 256 1024 4b23 4b24
:call resnet 256 1024 4b24 4b25
:call resnet 256 1024 4b25 4b26
:call resnet 256 1024 4b26 4b27
:call resnet 256 1024 4b27 4b28
:call resnet 256 1024 4b28 4b29
:call resnet 256 1024 4b29 4b30
:call resnet 256 1024 4b30 4b31
:call resnet 256 1024 4b31 4b32
:call resnet 256 1024 4b32 4b33
:call resnet 256 1024 4b33 4b34
:call resnet 256 1024 4b34 4b35
"""
res4b1 = block(res4a, 256, 1024, '4b1')
res4b2 = block(res4b1, 256, 1024, '4b2')
res4b3 = block(res4b2, 256, 1024, '4b3')
res4b4 = block(res4b3, 256, 1024, '4b4')
res4b5 = block(res4b4, 256, 1024, '4b5')
res4b6 = block(res4b5, 256, 1024, '4b6')
res4b7 = block(res4b6, 256, 1024, '4b7')
res4b8 = block(res4b7, 256, 1024, '4b8')
res4b9 = block(res4b8, 256, 1024, '4b9')
res4b10 = block(res4b9, 256, 1024, '4b10')
res4b11 = block(res4b10, 256, 1024, '4b11')
res4b12 = block(res4b11, 256, 1024, '4b12')
res4b13 = block(res4b12, 256, 1024, '4b13')
res4b14 = block(res4b13, 256, 1024, '4b14')
res4b15 = block(res4b14, 256, 1024, '4b15')
res4b16 = block(res4b15, 256, 1024, '4b16')
res4b17 = block(res4b16, 256, 1024, '4b17')
res4b18 = block(res4b17, 256, 1024, '4b18')
res4b19 = block(res4b18, 256, 1024, '4b19')
res4b20 = block(res4b19, 256, 1024, '4b20')
res4b21 = block(res4b20, 256, 1024, '4b21')
res4b22 = block(res4b21, 256, 1024, '4b22')
res4b23 = block(res4b22, 256, 1024, '4b23')
res4b24 = block(res4b23, 256, 1024, '4b24')
res4b25 = block(res4b24, 256, 1024, '4b25')
res4b26 = block(res4b25, 256, 1024, '4b26')
res4b27 = block(res4b26, 256, 1024, '4b27')
res4b28 = block(res4b27, 256, 1024, '4b28')
res4b29 = block(res4b28, 256, 1024, '4b29')
res4b30 = block(res4b29, 256, 1024, '4b30')
res4b31 = block(res4b30, 256, 1024, '4b31')
res4b32 = block(res4b31, 256, 1024, '4b32')
res4b33 = block(res4b32, 256, 1024, '4b33')
res4b34 = block(res4b33, 256, 1024, '4b34')
res4b35 = block(res4b34, 256, 1024, '4b35')
"""
:call resnet-reduce 512 2048 4b35 5a
"""
res5a = block_reduce(res4b35, 512, 2048, '5a', stride=1)
"""
:call resnet 512 2048 5a 5b
:call resnet 512 2048 5b 5c
"""
res5b = block(res5a, 512, 2048, '5b', hole=2)
res5c = block(res5b, 512, 2048, '5c', hole=2)
"""
layer {
bottom: "res5c"
top: "pool5"
name: "pool5"
type: "Pooling"
pooling_param {
kernel_size: 7
stride: 1
pool: AVE
}
}
"""
if final_layer:
pool5 = avg_pool(res5c, 7, stride=1, name='pool5', padding='VALID')
if convolutional:
z = conv(pool5, 1000, size=1, name='fc1000', activation=None)
else:
z = resnet_inner(pool5, 1000, info=info, parameters=parameters, activation=None, name='fc1000')
else:
z = res5c
return z
def build_network_atrous4(x, info=DummyDict(), parameters={},
phase_test=None, convolutional=False, final_layer=True,
pre_adjust_batch_norm=False):
# Set up VGG-16
conv = functools.partial(resnet_conv, size=3, parameters=parameters,
info=info, pre_adjust_batch_norm=pre_adjust_batch_norm)
aconv = functools.partial(resnet_atrous_conv, size=3, parameters=parameters,
info=info, pre_adjust_batch_norm=pre_adjust_batch_norm)
pool = functools.partial(ops.max_pool, info=info)
avg_pool = functools.partial(ops.avg_pool, info=info)
dropout = functools.partial(ops.dropout, phase_test=phase_test, info=info)
z = x
conv1 = conv(z, 64, size=7, stride=2, name='conv1', bn_name='bn_conv1',
scale_name='scale_conv1')
pool1 = pool(conv1, 3, stride=2, name='pool1')
res2a_branch1 = conv(pool1, 256, size=1, name='res2a_branch1', bn_name='bn2a_branch1',
scale_name='scale2a_branch1', activation=None)
res2a_branch2a = conv(pool1, 64, size=1, name='res2a_branch2a', bn_name='bn2a_branch2a',
scale_name='scale2a_branch2a')
res2a_branch2b = conv(res2a_branch2a, 64, size=3, name='res2a_branch2b', bn_name='bn2a_branch2b',
scale_name='scale2a_branch2b')
res2a_branch2c = conv(res2a_branch2b, 256, size=1, name='res2a_branch2c', bn_name='bn2a_branch2c',
scale_name='scale2a_branch2c', activation=None)
res2a = tf.nn.relu(tf.add(res2a_branch1, res2a_branch2c), name='res2a')
info['activations']['res2a'] = res2a
# ---
"""
:call nobias-conv 1 0 1 64 res2a res2b_branch2a
:call batch-norm res2b_branch2a bn2b_branch2a
:call bias res2b_branch2a scale2b_branch2a
:call relu res2b_branch2a
:#
:call nobias-conv 3 1 1 64 res2b_branch2a res2b_branch2b
:call batch-norm res2b_branch2b bn2b_branch2b
:call bias res2b_branch2b scale2b_branch2b
:call relu res2b_branch2b
:#
:call nobias-conv 1 0 1 256 res2b_branch2b res2b_branch2c
:call batch-norm res2b_branch2c bn2b_branch2c
:call bias res2b_branch2c scale2b_branch2c
:call add res2a res2b_branch2c res2b
:call relu res2b
"""
def block(x, ch1, ch2, b, hole=1):
output = 'res{}'.format(b)
branch2a = conv(x, ch1, size=1, name='res{}_branch2a'.format(b),
bn_name='bn{}_branch2a'.format(b),
scale_name='scale{}_branch2a'.format(b))
branch2b = aconv(branch2a, ch1, size=3, hole=hole, name='res{}_branch2b'.format(b),
bn_name='bn{}_branch2b'.format(b),
scale_name='scale{}_branch2b'.format(b))
branch2c = conv(branch2b, ch2, size=1, name='res{}_branch2c'.format(b),
bn_name='bn{}_branch2c'.format(b),
scale_name='scale{}_branch2c'.format(b), activation=None)
z = tf.nn.relu(tf.add(x, branch2c), name=output)
info['activations'][output] = z
return z
"""
:call nobias-conv 1 0 2 ${ch2} res${a} res${b}_branch1
:call batch-norm res${b}_branch1 bn${b}_branch1
:call bias res${b}_branch1 scale${b}_branch1
:#
:call nobias-conv 1 0 2 ${ch1} res${a} res${b}_branch2a
:call batch-norm res${b}_branch2a bn${b}_branch2a
:call bias res${b}_branch2a scale${b}_branch2a
:call relu res${b}_branch2a
:#
:call nobias-conv 3 1 1 ${ch1} res${b}_branch2a res${b}_branch2b
:call batch-norm res${b}_branch2b bn${b}_branch2b
:call bias res${b}_branch2b scale${b}_branch2b
:call relu res${b}_branch2b
:#
:call nobias-conv 1 0 1 ${ch2} res${b}_branch2b res${b}_branch2c
:call batch-norm res${b}_branch2c bn${b}_branch2c
:call bias res${b}_branch2c scale${b}_branch2c
:call add res${b}_branch1 res${b}_branch2c res${b}
:call relu res${b}
"""
def block_reduce(x, ch1, ch2, b, stride=2, hole=1):
output = 'res{}'.format(b)
branch1 = conv(x, ch2, size=1, stride=stride,
name='res{}_branch1'.format(b),
bn_name='bn{}_branch1'.format(b),
scale_name='scale{}_branch1'.format(b),
activation=None)
branch2a = conv(x, ch1, size=1, stride=stride,
name='res{}_branch2a'.format(b),
bn_name='bn{}_branch2a'.format(b),
scale_name='scale{}_branch2a'.format(b))
branch2b = aconv(branch2a, ch1, size=3, hole=hole,
name='res{}_branch2b'.format(b),
bn_name='bn{}_branch2b'.format(b),
scale_name='scale{}_branch2b'.format(b))
branch2c = conv(branch2b, ch2, size=1,
name='res{}_branch2c'.format(b),
bn_name='bn{}_branch2c'.format(b),
scale_name='scale{}_branch2c'.format(b), activation=None)
z = tf.nn.relu(tf.add(branch1, branch2c), name=output)
info['activations'][output] = z
return z
res2b = block(res2a, 64, 256, '2b')
res2c = block(res2b, 64, 256, '2c')
res3a = block_reduce(res2c, 128, 512, '3a')
"""
:call resnet 128 512 3a 3b1
:call resnet 128 512 3b1 3b2
:call resnet 128 512 3b2 3b3
:call resnet 128 512 3b3 3b4
:call resnet 128 512 3b4 3b5
:call resnet 128 512 3b5 3b6
:call resnet 128 512 3b6 3b7
"""
res3b1 = block(res3a, 128, 512, '3b1')
res3b2 = block(res3b1, 128, 512, '3b2')
res3b3 = block(res3b2, 128, 512, '3b3')
res3b4 = block(res3b3, 128, 512, '3b4')
res3b5 = block(res3b4, 128, 512, '3b5')
res3b6 = block(res3b5, 128, 512, '3b6')
res3b7 = block(res3b6, 128, 512, '3b7')
"""
:call resnet-reduce 256 1024 3b7 4a
"""
res4a = block_reduce(res3b7, 256, 1024, '4a', stride=1, hole=2)
"""
:call resnet 256 1024 4a 4b1
:call resnet 256 1024 4b1 4b2
:call resnet 256 1024 4b2 4b3
:call resnet 256 1024 4b3 4b4
:call resnet 256 1024 4b4 4b5
:call resnet 256 1024 4b5 4b6
:call resnet 256 1024 4b6 4b7
:call resnet 256 1024 4b7 4b8
:call resnet 256 1024 4b8 4b9
:call resnet 256 1024 4b9 4b10
:call resnet 256 1024 4b10 4b11
:call resnet 256 1024 4b11 4b12
:call resnet 256 1024 4b12 4b13
:call resnet 256 1024 4b13 4b14
:call resnet 256 1024 4b14 4b15
:call resnet 256 1024 4b15 4b16
:call resnet 256 1024 4b16 4b17
:call resnet 256 1024 4b17 4b18
:call resnet 256 1024 4b18 4b19
:call resnet 256 1024 4b19 4b20
:call resnet 256 1024 4b20 4b21
:call resnet 256 1024 4b21 4b22
:call resnet 256 1024 4b22 4b23
:call resnet 256 1024 4b23 4b24
:call resnet 256 1024 4b24 4b25
:call resnet 256 1024 4b25 4b26
:call resnet 256 1024 4b26 4b27
:call resnet 256 1024 4b27 4b28
:call resnet 256 1024 4b28 4b29
:call resnet 256 1024 4b29 4b30
:call resnet 256 1024 4b30 4b31
:call resnet 256 1024 4b31 4b32
:call resnet 256 1024 4b32 4b33
:call resnet 256 1024 4b33 4b34
:call resnet 256 1024 4b34 4b35
"""
res4b1 = block(res4a, 256, 1024, '4b1', hole=2)
res4b2 = block(res4b1, 256, 1024, '4b2', hole=2)
res4b3 = block(res4b2, 256, 1024, '4b3', hole=2)
res4b4 = block(res4b3, 256, 1024, '4b4', hole=2)
res4b5 = block(res4b4, 256, 1024, '4b5', hole=2)
res4b6 = block(res4b5, 256, 1024, '4b6', hole=2)
res4b7 = block(res4b6, 256, 1024, '4b7', hole=2)
res4b8 = block(res4b7, 256, 1024, '4b8', hole=2)
res4b9 = block(res4b8, 256, 1024, '4b9', hole=2)
res4b10 = block(res4b9, 256, 1024, '4b10', hole=2)
res4b11 = block(res4b10, 256, 1024, '4b11', hole=2)
res4b12 = block(res4b11, 256, 1024, '4b12', hole=2)
res4b13 = block(res4b12, 256, 1024, '4b13', hole=2)
res4b14 = block(res4b13, 256, 1024, '4b14', hole=2)
res4b15 = block(res4b14, 256, 1024, '4b15', hole=2)
res4b16 = block(res4b15, 256, 1024, '4b16', hole=2)
res4b17 = block(res4b16, 256, 1024, '4b17', hole=2)
res4b18 = block(res4b17, 256, 1024, '4b18', hole=2)
res4b19 = block(res4b18, 256, 1024, '4b19', hole=2)
res4b20 = block(res4b19, 256, 1024, '4b20', hole=2)
res4b21 = block(res4b20, 256, 1024, '4b21', hole=2)
res4b22 = block(res4b21, 256, 1024, '4b22', hole=2)
res4b23 = block(res4b22, 256, 1024, '4b23', hole=2)
res4b24 = block(res4b23, 256, 1024, '4b24', hole=2)
res4b25 = block(res4b24, 256, 1024, '4b25', hole=2)
res4b26 = block(res4b25, 256, 1024, '4b26', hole=2)
res4b27 = block(res4b26, 256, 1024, '4b27', hole=2)
res4b28 = block(res4b27, 256, 1024, '4b28', hole=2)
res4b29 = block(res4b28, 256, 1024, '4b29', hole=2)
res4b30 = block(res4b29, 256, 1024, '4b30', hole=2)
res4b31 = block(res4b30, 256, 1024, '4b31', hole=2)
res4b32 = block(res4b31, 256, 1024, '4b32', hole=2)
res4b33 = block(res4b32, 256, 1024, '4b33', hole=2)
res4b34 = block(res4b33, 256, 1024, '4b34', hole=2)
res4b35 = block(res4b34, 256, 1024, '4b35', hole=2)
"""
:call resnet-reduce 512 2048 4b35 5a
"""
res5a = block_reduce(res4b35, 512, 2048, '5a', stride=1, hole=4)
"""
:call resnet 512 2048 5a 5b
:call resnet 512 2048 5b 5c
"""
res5b = block(res5a, 512, 2048, '5b', hole=4)
res5c = block(res5b, 512, 2048, '5c', hole=4)
"""
layer {
bottom: "res5c"
top: "pool5"
name: "pool5"
type: "Pooling"
pooling_param {
kernel_size: 7
stride: 1
pool: AVE
}
}
"""
#res5c =
#res5c = tf.strided_slice(res5c, [0, 0, 0, 0], res5c.get_shape(), [1, 4, 4, 1])
if final_layer:
pool5 = ops.atrous_avg_pool(res5c, 7, rate=4, name='pool5', padding='SAME' if convolutional else 'VALID')
info['activations']['pool5'] = pool5
##pool5 = avg_pool(res5c, 7 * 4, stride=1, name='pool5', padding='SAME' if convolutional else 'VALID')
#pool5 = res5c
if convolutional:
z = conv(pool5, 1000, size=1, name='fc1000', activation=None)
else:
z = resnet_inner(pool5, 1000, info=info, parameters=parameters, activation=None, name='fc1000')
else:
z = res5c
return z
|
|
#
# Session 5, part 3
#
print("Begin import...")
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
#from skimage import data # ERROR: Cannot load libmkl_def.so
from scipy.misc import imresize
from scipy.ndimage.filters import gaussian_filter
print("Loading tensorflow...")
import tensorflow as tf
from libs import utils, gif, datasets, dataset_utils, nb_utils
# dja
plt.style.use('bmh')
#import datetime
#np.set_printoptions(threshold=np.inf) # display FULL array (infinite)
plt.ion()
plt.figure(figsize=(4, 4))
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from matplotlib.cbook import MatplotlibDeprecationWarning
warnings.filterwarnings("ignore", category=MatplotlibDeprecationWarning)
def wait(n):
#plt.pause(n)
plt.pause(3)
input("(press enter)")
##
## Part 3 - Latent-Space Arithmetic
##
#
# Loading the Pre-Trained Model
#
# We're now going to work with a pre-trained VAEGAN model on the
# Celeb Net dataset. Let's load this model:
tf.reset_default_graph()
print("Import vaegan model...")
from libs import celeb_vaegan as CV
net = CV.get_celeb_vaegan_model()
# We'll load the graph_def contained inside this dictionary. It
# follows the same idea as the `inception`, `vgg16`, and `i2v`
# pretrained networks. It is a dictionary with the key `graph_def`
# defined, with the graph's pretrained network. It also includes
# `labels` and a `preprocess` key. We'll have to do one additional
# thing which is to turn off the random sampling from variational
# layer. This isn't really necessary but will ensure we get the same
# results each time we use the network. We'll use the `input_map`
# argument to do this. Don't worry if this doesn't make any sense, as
# we didn't cover the variational layer in any depth. Just know that
# this is removing a random process from the network so that it is
# completely deterministic. If we hadn't done this, we'd get slightly
# different results each time we used the network (which may even be
# desirable for your purposes).
sess = tf.Session()
g = tf.get_default_graph()
print("import graph_def...")
tf.import_graph_def(net['graph_def'], name='net', input_map={
'encoder/variational/random_normal:0': np.zeros(512, dtype=np.float32)})
#for op in g.get_operations():
# print(op.name)
# Now let's get the relevant parts of the network: `X`, the input
# image to the network, `Z`, the input image's encoding, and `G`, the
# decoded image. In many ways, this is just like the Autoencoders we
# learned about in Session 3, except instead of `Y` being the output,
# we have `G` from our generator! And the way we train it is very
# different: we use an adversarial process between the generator and
# discriminator, and use the discriminator's own distance measure to
# help train the network, rather than pixel-to-pixel differences.
X = g.get_tensor_by_name('net/x:0')
Z = g.get_tensor_by_name('net/encoder/variational/z:0')
G = g.get_tensor_by_name('net/generator/x_tilde:0')
# Let's get some data to play with:
files = datasets.CELEB()
#img_i = 50
#img = plt.imread(files[img_i])
#plt.imshow(img)
#plt.title("some celeb")
#wait(1)
# Now preprocess the image, and see what the generated image looks
# like (i.e. the lossy version of the image through the network's
# encoding and decoding).
#p = CV.preprocess(img)
#synth = sess.run(G, feed_dict={X: p[np.newaxis]})
#fig, axs = plt.subplots(1, 2, figsize=(10, 5))
#axs[0].imshow(p)
#plt.imshow(synth[0] / synth.max())
#plt.title("lossy version")
#wait(1)
# So we lost a lot of details but it seems to be able to express
# quite a bit about the image. Our inner most layer, `Z`, is only 512
# values yet our dataset was 200k images of 64 x 64 x 3 pixels (about
# 2.3 GB of information). That means we're able to express our nearly
# 2.3 GB of information with only 512 values! Having some loss of
# detail is certainly expected!
#
# <a name="exploring-the-celeb-net-attributes"></a>
# ## Exploring the Celeb Net Attributes
#
# Let's now try and explore the attributes of our dataset. We didn't
# train the network with any supervised labels, but the Celeb Net
# dataset has 40 attributes for each of its 200k images. These are
# already parsed and stored for you in the `net` dictionary:
print("net keys: ", net.keys())
len(net['labels'])
print("net labels: ", net['labels'])
# Let's see what attributes exist for one of the celeb images:
#plt.title("attributes")
#plt.imshow(img)
#print("attributes of ", img_i)
#[net['labels'][i] for i, attr_i in enumerate(net['attributes'][img_i]) if attr_i]
#for i, attr_i in enumerate(net['attributes'][img_i]):
# if attr_i:
# print(i, net['labels'][i])
#wait(1)
#
# Find the Latent Encoding for an Attribute
#
# The Celeb Dataset includes attributes for each of its 200k+ images.
# This allows us to feed into the encoder some images that we know
# have a *specific* attribute, e.g. "smiling". We store what their
# encoding is and retain this distribution of encoded values. We can
# then look at any other image and see how it is encoded, and
# slightly change the encoding by adding the encoded of our smiling
# images to it! The result should be our image but with more smiling.
# That is just insane and we're going to see how to do it. First lets
# inspect our latent space:
print("Z shape: ", Z.get_shape())
# We have 512 features that we can encode any image with. Assuming
# our network is doing an okay job, let's try to find the `Z` of the
# first 100 images with the 'Bald' attribute:
bald_label = net['labels'].index('Big_Nose')
print("bald_label: ", bald_label)
# Let's get all the bald image indexes:
bald_img_idxs = np.where(net['attributes'][:, bald_label])[0]
print("bald img idxs: ", bald_img_idxs)
print("bald idxs len: ", len(bald_img_idxs))
# Now let's just load 100 of their images:
print("big nose #100: ", bald_img_idxs[99])
bald_imgs = [plt.imread(files[bald_img_i])[..., :3]
for bald_img_i in bald_img_idxs[:100]]
print("bald imgs len: ", len(bald_imgs))
# Let's see if the mean image looks like a good bald person or not:
plt.title("bald person")
plt.imshow(np.mean(bald_imgs, 0).astype(np.uint8))
wait(1)
# Yes that is definitely a bald person. Now we're going to try to
# find the encoding of a bald person. One method is to try and find
# every other possible image and subtract the "bald" person's latent
# encoding. Then we could add this encoding back to any new image and
# hopefully it makes the image look more bald. Or we can find a bunch
# of bald people's encodings and then average their encodings
# together. This should reduce the noise from having many different
# attributes, but keep the signal pertaining to the baldness.
#
# Let's first preprocess the images:
bald_p = np.array([CV.preprocess(bald_img_i) for bald_img_i in bald_imgs])
# Now we can find the latent encoding of the images by calculating
# `Z` and feeding `X` with our `bald_p` images:
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
bald_zs = sess.run(Z, feed_dict={X: bald_p}) # dja
# Now let's calculate the mean encoding:
bald_feature = np.mean(bald_zs, 0, keepdims=True)
print("bald feature shape: ", bald_feature.shape)
# Let's try and synthesize from the mean bald feature now and see how
# it looks:
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
bald_generated = sess.run(G, feed_dict={Z: bald_feature}) # dja
plt.title("bald generated")
plt.imshow(bald_generated[0] / bald_generated.max())
wait(1)
#
# Latent Feature Arithmetic
#
# Let's now try to write a general function for performing everything
# we've just done so that we can do this with many different
# features. We'll then try to combine them and synthesize people with
# the features we want them to have...
def get_features_for(label='Bald', has_label=True, n_imgs=50):
label_i = net['labels'].index(label)
label_idxs = np.where(net['attributes'][:, label_i] == has_label)[0]
label_idxs = np.random.permutation(label_idxs)[:n_imgs]
imgs = [plt.imread(files[img_i])[..., :3]
for img_i in label_idxs]
preprocessed = np.array([CV.preprocess(img_i) for img_i in imgs])
zs = sess.run(Z, feed_dict={X: preprocessed})
return np.mean(zs, 0)
# Let's try getting some attributes positive and negative features.
# Be sure to explore different attributes! Also try different values
# of `n_imgs`, e.g. 2, 3, 5, 10, 50, 100. What happens with different
# values?
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# Explore different attributes
z1 = get_features_for('Attractive', True, n_imgs=10)
z2 = get_features_for('Attractive', False, n_imgs=10)
z3 = get_features_for('Chubby', True, n_imgs=10)
z4 = get_features_for('Chubby', False, n_imgs=10)
b1 = sess.run(G, feed_dict={Z: z1[np.newaxis]})
b2 = sess.run(G, feed_dict={Z: z2[np.newaxis]})
b3 = sess.run(G, feed_dict={Z: z3[np.newaxis]})
b4 = sess.run(G, feed_dict={Z: z4[np.newaxis]})
plt.close()
fig, axs = plt.subplots(1, 5, figsize=(9, 4))
plt.suptitle("attract / not attract / chubby / not chubby")
axs[0].imshow(b1[0] / b1.max()), axs[0].grid('off'), axs[0].axis('off')
axs[1].imshow(b2[0] / b2.max()), axs[1].grid('off'), axs[1].axis('off')
axs[2].imshow(b3[0] / b3.max()), axs[2].grid('off'), axs[2].axis('off')
axs[3].imshow(b4[0] / b4.max()), axs[3].grid('off'), axs[3].axis('off')
wait(1)
plt.cla()
# Now let's interpolate between the "Male" and "Not Male" categories:
notmale_vector = z2 - z1
n_imgs = 5
amt = np.linspace(0, 1, n_imgs)
zs = np.array([z1 + notmale_vector*amt_i for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
plt.suptitle("attract ... not attract")
#fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i], 0, 1))
ax_i.grid('off')
ax_i.axis('off')
wait(1)
plt.cla()
# And the same for smiling:
smiling_vector = z3 - z4
amt = np.linspace(0, 1, n_imgs)
zs = np.array([z4 + smiling_vector*amt_i for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
plt.suptitle("not chubby ... chubby")
#fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i] / g[i].max(), 0, 1))
ax_i.grid('off')
ax_i.axis('off')
wait(1)
plt.cla()
# There's also no reason why we have to be within the boundaries of
# 0-1. We can extrapolate beyond, in, and around the space.
plt.suptitle("extrapolate")
n_imgs = 5
amt = np.linspace(-1.5, 2.5, n_imgs)
zs = np.array([z4 + smiling_vector*amt_i for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
#fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i], 0, 1))
#ax_i.grid('off')
ax_i.axis('off')
wait(1)
plt.cla()
#
# Extensions
#
# [Tom White](https://twitter.com/dribnet), Lecturer at Victoria
# University School of Design, also recently demonstrated an
# alternative way of interpolating using a sinusoidal interpolation.
# He's created some of the most impressive generative images out
# there and luckily for us he has detailed his process in the arxiv
# preprint: https://arxiv.org/abs/1609.04468 - as well, be sure to
# check out his twitter bot, https://twitter.com/smilevector - which
# adds smiles to people :) - Note that the network we're using is
# only trained on aligned faces that are frontally facing, though
# this twitter bot is capable of adding smiles to any face. I suspect
# that he is running a face detection algorithm such as AAM, CLM, or
# ASM, cropping the face, aligning it, and then running a similar
# algorithm to what we've done above. Or else, perhaps he has trained
# a new model on faces that are not aligned. In any case, it is well
# worth checking out!
#
# Let's now try and use sinusoidal interpolation using his
# implementation in
# [plat](https://github.com/dribnet/plat/blob/master/plat/interpolate.py#L16-L24)
# which I've copied below:
def slerp(val, low, high):
# Spherical interpolation. val has a range of 0 to 1.
if val <= 0:
return low
elif val >= 1:
return high
omega = np.arccos(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)))
so = np.sin(omega)
return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega)/so * high
plt.suptitle("sinusoidal interp")
amt = np.linspace(0, 1, n_imgs)
zs = np.array([slerp(amt_i, z1, z2) for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
#fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i], 0, 1))
ax_i.grid('off')
ax_i.axis('off')
wait(1)
plt.cla()
# It's certainly worth trying especially if you are looking to
# explore your own model's latent space in new and interesting ways.
#
# Let's try and load an image that we want to play with. We need an
# image as similar to the Celeb Dataset as possible. Unfortunately,
# we don't have access to the algorithm they used to "align" the
# faces, so we'll need to try and get as close as possible to an
# aligned face image. One way you can do this is to load up one of
# the celeb images and try and align an image to it using e.g.
# Photoshop or another photo editing software that lets you blend and
# move the images around. That's what I did for my own face...
img = plt.imread('parag.png')[..., :3]
img = CV.preprocess(img, crop_factor=1.0)[np.newaxis]
# Let's see how the network encodes it:
plt.suptitle("blurry Parag")
img_ = sess.run(G, feed_dict={X: img})
#fig, axs = plt.subplots(1, 2, figsize=(10, 5))
plt.cla()
for i, ax_i in enumerate(axs):
ax_i.cla()
ax_i.grid('off')
ax_i.axis('off')
axs[0].imshow(img[0])
axs[1].imshow(np.clip(img_[0] / np.max(img_), 0, 1))
wait(1)
plt.cla()
# Notice how blurry the image is. Tom White's preprint suggests one
# way to sharpen the image is to find the "Blurry" attribute vector:
z1 = get_features_for('Blurry', True, n_imgs=25)
z2 = get_features_for('Blurry', False, n_imgs=25)
unblur_vector = z2 - z1
z = sess.run(Z, feed_dict={X: img})
plt.suptitle("unblur vector")
n_imgs = 5
amt = np.linspace(0, 1, n_imgs)
zs = np.array([z[0] + unblur_vector * amt_i for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
#fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i] / g[i].max(), 0, 1))
ax_i.grid('off')
ax_i.axis('off')
wait(1)
plt.cla()
# Notice that the image also gets brighter and perhaps other features
# than simply the bluriness of the image changes. Tom's preprint
# suggests that this is due to the correlation that blurred images
# have with other things such as the brightness of the image,
# possibly due biases in labeling or how photographs are taken. He
# suggests that another way to unblur would be to synthetically blur
# a set of images and find the difference in the encoding between the
# real and blurred images. We can try it like so:
from scipy.ndimage import gaussian_filter
idxs = np.random.permutation(range(len(files)))
imgs = [plt.imread(files[idx_i]) for idx_i in idxs[:100]]
blurred = []
for img_i in imgs:
img_copy = np.zeros_like(img_i)
for ch_i in range(3):
img_copy[..., ch_i] = gaussian_filter(img_i[..., ch_i], sigma=3.0)
blurred.append(img_copy)
# Now let's preprocess the original images and the blurred ones
imgs_p = np.array([CV.preprocess(img_i) for img_i in imgs])
blur_p = np.array([CV.preprocess(img_i) for img_i in blurred])
# And then compute each of their latent features
noblur = sess.run(Z, feed_dict={X: imgs_p})
blur = sess.run(Z, feed_dict={X: blur_p})
synthetic_unblur_vector = np.mean(noblur - blur, 0)
plt.suptitle("synthetic unblur vector")
n_imgs = 5
amt = np.linspace(0, 1, n_imgs)
zs = np.array([z[0] + synthetic_unblur_vector * amt_i for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
#fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i], 0, 1))
ax_i.grid('off')
ax_i.axis('off')
wait(1)
plt.cla()
# For some reason, it also doesn't like my glasses very much. Let's
# try and add them back.
z1 = get_features_for('Eyeglasses', True)
z2 = get_features_for('Eyeglasses', False)
glass_vector = z1 - z2
z = sess.run(Z, feed_dict={X: img})
plt.suptitle("glass vector")
n_imgs = 5
amt = np.linspace(0, 1, n_imgs)
zs = np.array([z[0] + glass_vector * amt_i + unblur_vector * amt_i for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
#fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i], 0, 1))
ax_i.grid('off')
ax_i.axis('off')
wait(1)
plt.cla()
# Well, more like sunglasses then. Let's try adding everything in
# there now!
plt.suptitle("everything")
n_imgs = 5
amt = np.linspace(0, 1.0, n_imgs)
zs = np.array([z[0] + glass_vector * amt_i + unblur_vector * amt_i + amt_i * smiling_vector for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
#fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i], 0, 1))
ax_i.grid('off')
ax_i.axis('off')
wait(1)
plt.cla()
# Well it was worth a try anyway. We can also try with a lot of
# images and create a gif montage of the result:
print("creating montage...")
n_imgs = 5
amt = np.linspace(0, 1.5, n_imgs)
z = sess.run(Z, feed_dict={X: imgs_p})
imgs = []
for amt_i in amt:
zs = z + synthetic_unblur_vector * amt_i + amt_i * smiling_vector
g = sess.run(G, feed_dict={Z: zs})
m = utils.montage(np.clip(g, 0, 1))
imgs.append(m)
gif.build_gif(imgs, saveto='celeb_unblur_chubby.gif', interval=0.2)
#ipyd.Image(url='celeb.gif?i={}'.format(np.random.rand()), height=1000, width=1000)
# Exploring multiple feature vectors and applying them to images from
# the celeb dataset to produce animations of a face, saving it as a
# GIF. Recall you can store each image frame in a list and then use
# the `gif.build_gif` function to create a gif. Explore your own
# syntheses and then include a gif of the different images you create
# as "celeb.gif" in the final submission. Perhaps try finding
# unexpected synthetic latent attributes in the same way that we
# created a blur attribute. You can check the documentation in
# scipy.ndimage for some other image processing techniques, for
# instance: http://www.scipy-lectures.org/advanced/image_processing/
# - and see if you can find the encoding of another attribute that
# you then apply to your own images. You can even try it with many
# images and use the `utils.montage` function to create a large grid
# of images that evolves over your attributes. Or create a set of
# expressions perhaps. Up to you just explore!
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
#... DO SOMETHING AWESOME ! ... #
#dja
#imgs = []
#gif.build_gif(imgs=imgs, saveto='vaegan.gif')
wait(1)
# Please visit [session-5-part2.ipynb](session-5-part2.ipynb) for the
# rest of the homework!
# eop
|
|
from past.builtins import basestring
import os
import itertools
import builtins
import json
import logging
import warnings
from math import ceil
from contextlib import contextmanager
from django.apps import apps
from django.db import connection
from django.db.migrations.operations.base import Operation
from osf.models.base import generate_object_id
from osf.utils.sanitize import strip_html, unescape_entities
from website import settings
from website.project.metadata.schemas import get_osf_meta_schemas
logger = logging.getLogger(__file__)
increment = 100000
# Dict to map original schema formats to schema block types
FORMAT_TYPE_TO_TYPE_MAP = {
('multiselect', 'choose'): 'multi-select-input',
(None, 'multiselect'): 'multi-select-input',
(None, 'choose'): 'single-select-input',
('osf-upload-open', 'osf-upload'): 'file-input',
('osf-upload-toggle', 'osf-upload'): 'file-input',
('singleselect', 'choose'): 'single-select-input',
('text', 'string'): 'short-text-input',
('textarea', 'osf-author-import'): 'contributors-input',
('textarea', None): 'long-text-input',
('textarea', 'string'): 'long-text-input',
('textarea-lg', None): 'long-text-input',
('textarea-lg', 'string'): 'long-text-input',
('textarea-xl', 'string'): 'long-text-input',
}
def get_osf_models():
"""
Helper function to retrieve all osf related models.
Example usage:
with disable_auto_now_fields(models=get_osf_models()):
...
"""
return list(itertools.chain(*[app.get_models() for app in apps.get_app_configs() if app.label.startswith('addons_') or app.label.startswith('osf')]))
@contextmanager
def disable_auto_now_fields(models=None):
"""
Context manager to disable auto_now field updates.
If models=None, updates for all auto_now fields on *all* models will be disabled.
:param list models: Optional list of models for which auto_now field updates should be disabled.
"""
if not models:
models = apps.get_models()
changed = []
for model in models:
for field in model._meta.get_fields():
if hasattr(field, 'auto_now') and field.auto_now:
field.auto_now = False
changed.append(field)
try:
yield
finally:
for field in changed:
if hasattr(field, 'auto_now') and not field.auto_now:
field.auto_now = True
@contextmanager
def disable_auto_now_add_fields(models=None):
"""
Context manager to disable auto_now_add field updates.
If models=None, updates for all auto_now_add fields on *all* models will be disabled.
:param list models: Optional list of models for which auto_now_add field updates should be disabled.
"""
if not models:
models = apps.get_models()
changed = []
for model in models:
for field in model._meta.get_fields():
if hasattr(field, 'auto_now_add') and field.auto_now_add:
field.auto_now_add = False
changed.append(field)
try:
yield
finally:
for field in changed:
if hasattr(field, 'auto_now_add') and not field.auto_now_add:
field.auto_now_add = True
def ensure_licenses(*args, **kwargs):
"""Upsert the licenses in our database based on a JSON file.
:return tuple: (number inserted, number updated)
Moved from website/project/licenses/__init__.py
"""
ninserted = 0
nupdated = 0
try:
NodeLicense = args[0].get_model('osf', 'nodelicense')
except Exception:
# Working outside a migration
from osf.models import NodeLicense
with builtins.open(
os.path.join(
settings.APP_PATH,
'node_modules', '@centerforopenscience', 'list-of-licenses', 'dist', 'list-of-licenses.json'
)
) as fp:
licenses = json.loads(fp.read())
for id, info in licenses.items():
name = info['name']
text = info['text']
properties = info.get('properties', [])
url = info.get('url', '')
node_license, created = NodeLicense.objects.get_or_create(license_id=id)
node_license.name = name
node_license.text = text
node_license.properties = properties
node_license.url = url
node_license.save()
if created:
ninserted += 1
else:
nupdated += 1
logger.info('License {name} ({id}) added to the database.'.format(name=name, id=id))
logger.info('{} licenses inserted into the database, {} licenses updated in the database.'.format(
ninserted, nupdated
))
return ninserted, nupdated
def remove_licenses(*args):
from osf.models import NodeLicense
pre_count = NodeLicense.objects.all().count()
NodeLicense.objects.all().delete()
logger.info('{} licenses removed from the database.'.format(pre_count))
def ensure_schemas(*args):
"""Import meta-data schemas from JSON to database if not already loaded
"""
state = args[0] if args else apps
schema_count = 0
try:
schema_model = state.get_model('osf', 'registrationschema')
except LookupError:
# Use MetaSchema model if migrating from a version before RegistrationSchema existed
schema_model = state.get_model('osf', 'metaschema')
for schema in get_osf_meta_schemas():
schema_obj, created = schema_model.objects.update_or_create(
name=schema['name'],
schema_version=schema.get('version', 1),
defaults={
'schema': schema,
}
)
schema_count += 1
if created:
logger.info('Added schema {} to the database'.format(schema['name']))
logger.info('Ensured {} schemas are in the database'.format(schema_count))
def remove_schemas(*args):
from osf.models import RegistrationSchema
pre_count = RegistrationSchema.objects.all().count()
RegistrationSchema.objects.all().delete()
logger.info('Removed {} schemas from the database'.format(pre_count))
def create_schema_block(state, schema_id, block_type, display_text='', required=False, help_text='',
registration_response_key=None, schema_block_group_key='', example_text=''):
"""
For mapping schemas to schema blocks: creates a given block from the specified parameters
"""
state = state or apps
schema_block_model = state.get_model('osf', 'registrationschemablock')
return schema_block_model.objects.create(
schema_id=schema_id,
block_type=block_type,
required=required,
display_text=unescape_entities(
display_text,
safe={
'<': '<',
'>': '>'
}
),
help_text=unescape_entities(
help_text,
safe={
'<': '<',
'>': '>'
}
),
registration_response_key=registration_response_key,
schema_block_group_key=schema_block_group_key,
example_text=unescape_entities(
example_text,
safe={
'<': '<',
'>': '>'
}
)
)
# Split question multiple choice options into their own blocks
def split_options_into_blocks(state, rs, question, schema_block_group_key):
"""
For mapping schemas to schema blocks: splits individual multiple choice
options into their own schema blocks
"""
for option in question.get('options', []):
answer_text = option if isinstance(option, basestring) else option.get('text')
help_text = '' if isinstance(option, basestring) else option.get('tooltip', '')
create_schema_block(
state,
rs.id,
'select-input-option',
display_text=answer_text,
help_text=help_text,
schema_block_group_key=schema_block_group_key,
)
def get_registration_response_key(question):
"""
For mapping schemas to schema blocks:
Answer ids will map to the user's response
"""
return question.get('qid', '') or question.get('id', '')
def find_title_description_help_example(rs, question):
"""
For mapping schemas to schema blocks:
Schemas are inconsistent with regards to the information going into "title",
"description", and "help" blocks.
:returns tuple, title, description, help, example strings
"""
title = question.get('title', '')
description = strip_html(question.get('description', ''))
help = strip_html(question.get('help', ''))
example = strip_html(question.get('example', ''))
schema_name = rs.schema.get('name', '')
# Descriptions that contain any of these keywords
# are turned into help text instead.
help_text_keywords = [
'please',
'choose',
'provide',
'format',
'describe',
'who',
'what',
'when',
'where',
'use',
'you',
'your',
'skip',
'enter',
]
if title:
if schema_name in ['OSF Preregistration', 'Prereg Challenge', 'Secondary Data Preregistration']:
# These two schemas have clear "example" text in the "help" section
example = help
help = description
description = ''
else:
for keyword in help_text_keywords:
if keyword in description.lower():
help = description
description = ''
break
else:
# if no title, description text is moved to title.
title = description
description = ''
return title, description, help, example
def get_subquestion_qid(question, subquestion):
"""
For mapping schemas to schema blocks:
Return a qid in the format "parent-id.current-id", to reflect its nested nature and ensure uniqueness
"""
return '{}.{}'.format(get_registration_response_key(question) or '', subquestion.get('id', ''))
def create_schema_blocks_for_question(state, rs, question, sub=False):
"""
For mapping schemas to schema blocks:
Split the original question from the schema into multiple schema blocks, all of
which have the same schema_block_group_key, to link them.
"""
# If there are subquestions, recurse and format subquestions
properties = question.get('properties')
if properties:
first_subquestion = properties[0]
first_subq_text = first_subquestion.get('title') or first_subquestion.get('description', '')
if first_subq_text:
# the first subquestion has text, so this seems like an actual [sub]section
create_schema_block(
state,
rs.id,
block_type='subsection-heading' if sub else 'section-heading',
display_text=question.get('title', '') or question.get('description', ''),
)
else:
# the first subquestion has no text, so the "section" heading is better interpreted as a question label
first_subquestion['title'] = question.get('title', '')
first_subquestion['description'] = question.get('description', '')
if not first_subquestion.get('help'):
first_subquestion['help'] = question.get('help', '')
for subquestion in properties:
subquestion['qid'] = get_subquestion_qid(question, subquestion)
create_schema_blocks_for_question(state, rs, subquestion, sub=True)
else:
# All schema blocks related to a particular question share the same schema_block_group_key.
schema_block_group_key = generate_object_id()
title, description, help, example = find_title_description_help_example(rs, question)
# Creates question title block
create_schema_block(
state,
rs.id,
block_type='question-label',
display_text=title,
help_text='' if description else help,
example_text=example,
schema_block_group_key=schema_block_group_key
)
# Creates paragraph block (question description)
if description:
create_schema_block(
state,
rs.id,
block_type='paragraph',
display_text=description,
help_text=help,
schema_block_group_key=schema_block_group_key,
)
if question.get('format') or question.get('type'):
# Creates question input block - this block will correspond to an answer
# Map the original schema section format to the new block_type, and create a schema block
block_type = FORMAT_TYPE_TO_TYPE_MAP[(question.get('format'), question.get('type'))]
create_schema_block(
state,
rs.id,
block_type,
required=question.get('required', False),
schema_block_group_key=schema_block_group_key,
registration_response_key=get_registration_response_key(question)
)
# If there are multiple choice answers, create blocks for these as well.
split_options_into_blocks(state, rs, question, schema_block_group_key)
def create_schema_blocks_for_atomic_schema(schema):
"""
Atomic schemas are a short cut around making an typical metaschemas by being totally explict about the schemablocks
being created.
"""
from osf.models import RegistrationSchemaBlock
current_group_key = None
for index, block in enumerate(schema.schema['blocks']):
# registration_response_key and schema_block_group_key are unused
# for most block types and can/should be empty.
# registration_response_key gets explicitly filtered by isnull :/
block['registration_response_key'] = None
block['schema_block_group_key'] = ''
block_type = block['block_type']
if block_type == 'question-label':
# This key will be used by input and option fields for this question
current_group_key = generate_object_id()
block['schema_block_group_key'] = current_group_key
elif block_type in RegistrationSchemaBlock.INPUT_BLOCK_TYPES:
block['registration_response_key'] = f'{schema.id}-{index}'
block['schema_block_group_key'] = current_group_key
elif block_type in ['select-input-option', 'select-input-other']:
block['schema_block_group_key'] = current_group_key
RegistrationSchemaBlock.objects.create(
schema_id=schema.id,
**block
)
def map_schemas_to_schemablocks(*args):
"""Map schemas to schema blocks
WARNING: Deletes existing schema blocks
"""
state = args[0] if args else apps
try:
schema_model = state.get_model('osf', 'registrationschema')
except LookupError:
# Use MetaSchema model if migrating from a version before RegistrationSchema existed
schema_model = state.get_model('osf', 'metaschema')
# Delete all existing schema blocks (avoid creating duplicates)
unmap_schemablocks(*args)
for rs in schema_model.objects.all():
logger.info('Migrating schema {}, version {} to schema blocks.'.format(rs.name, rs.schema_version))
if rs.schema.get('atomicSchema'):
create_schema_blocks_for_atomic_schema(rs)
continue
for page in rs.schema['pages']:
# Create page heading block
create_schema_block(
state,
rs.id,
'page-heading',
display_text=strip_html(page.get('title', '')),
help_text=strip_html(page.get('description', ''))
)
for question in page['questions']:
create_schema_blocks_for_question(state, rs, question)
def unmap_schemablocks(*args):
state = args[0] if args else apps
schema_block_model = state.get_model('osf', 'registrationschemablock')
schema_block_model.objects.all().delete()
class UpdateRegistrationSchemas(Operation):
"""Custom migration operation to update registration schemas
"""
reversible = True
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
ensure_schemas(to_state.apps)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
warnings.warn('Reversing UpdateRegistrationSchemas is a noop')
def describe(self):
return 'Updated registration schemas'
class UpdateRegistrationSchemasAndSchemaBlocks(Operation):
"""Custom migration operation to update registration schemas
"""
reversible = True
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
ensure_schemas(to_state.apps)
map_schemas_to_schemablocks(to_state.apps)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
RegistrationSchemaBlock = to_state.apps.get_model('osf', 'registrationschemablock')
RegistrationSchemaBlock.objects.all().delete()
def describe(self):
return 'Updated registration schemas and its schema blocks'
class AddWaffleFlags(Operation):
"""Custom migration operation to add waffle flags
Params:
- flag_names: iterable of strings, flag names to create
- on_for_everyone: boolean (default False), whether to activate the newly created flags
"""
reversible = True
def __init__(self, flag_names, on_for_everyone=False):
self.flag_names = flag_names
self.on_for_everyone = on_for_everyone
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
Flag = to_state.apps.get_model('waffle', 'flag')
for flag_name in self.flag_names:
Flag.objects.get_or_create(name=flag_name, defaults={'everyone': self.on_for_everyone})
def database_backwards(self, app_label, schema_editor, from_state, to_state):
Flag = to_state.apps.get_model('waffle', 'flag')
Flag.objects.filter(name__in=self.flag_names).delete()
def describe(self):
return 'Adds waffle flags: {}'.format(', '.join(self.flag_names))
class DeleteWaffleFlags(Operation):
"""Custom migration operation to delete waffle flags
Params:
- flag_names: iterable of strings, flag names to delete
"""
reversible = True
def __init__(self, flag_names):
self.flag_names = flag_names
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
Flag = to_state.apps.get_model('waffle', 'flag')
Flag.objects.filter(name__in=self.flag_names).delete()
def database_backwards(self, app_label, schema_editor, from_state, to_state):
Flag = to_state.apps.get_model('waffle', 'flag')
for flag_name in self.flag_names:
Flag.objects.get_or_create(name=flag_name)
def describe(self):
return 'Removes waffle flags: {}'.format(', '.join(self.flag_names))
class AddWaffleSwitches(Operation):
"""Custom migration operation to add waffle switches
Params:
- switch_names: iterable of strings, the names of the switches to create
- active: boolean (default False), whether the switches should be active
"""
reversible = True
def __init__(self, switch_names, active=False):
self.switch_names = switch_names
self.active = active
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
Switch = to_state.apps.get_model('waffle', 'switch')
for switch in self.switch_names:
Switch.objects.get_or_create(name=switch, defaults={'active': self.active})
def database_backwards(self, app_label, schema_editor, from_state, to_state):
Switch = to_state.apps.get_model('waffle', 'switch')
Switch.objects.filter(name__in=self.switch_names).delete()
def describe(self):
return 'Adds waffle switches: {}'.format(', '.join(self.switch_names))
class DeleteWaffleSwitches(Operation):
"""Custom migration operation to delete waffle switches
Params:
- switch_names: iterable of strings, switch names to delete
"""
reversible = True
def __init__(self, switch_names):
self.switch_names = switch_names
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
Switch = to_state.apps.get_model('waffle', 'switch')
Switch.objects.filter(name__in=self.switch_names).delete()
def database_backwards(self, app_label, schema_editor, from_state, to_state):
Switch = to_state.apps.get_model('waffle', 'switch')
for switch in self.switch_names:
Switch.objects.get_or_create(name=switch)
def describe(self):
return 'Removes waffle switches: {}'.format(', '.join(self.switch_names))
def batch_node_migrations(state, migrations):
AbstractNode = state.get_model('osf', 'abstractnode')
max_nid = getattr(AbstractNode.objects.last(), 'id', 0)
for migration in migrations:
total_pages = int(ceil(max_nid / float(increment)))
page_start = 0
page_end = 0
page = 0
logger.info('{}'.format(migration['description']))
while page_end <= (max_nid):
page += 1
page_end += increment
if page <= total_pages:
logger.info('Updating page {} / {}'.format(page_end / increment, total_pages))
with connection.cursor() as cursor:
cursor.execute(migration['sql'].format(
start=page_start,
end=page_end
))
page_start = page_end
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#print("Content-type:text/html\r\n\r\n")
from six import iteritems
import cgi, cgitb
import os, shutil
import sys, traceback
from modules.logintools import login
import urllib
from modules.gitdox_sql import *
from modules.gitdox_git import *
from modules.configobj import ConfigObj
import requests
from requests.auth import HTTPBasicAuth
import platform, re
from paths import ether_url, get_menu, get_nlp_credentials
from modules.ether import make_spreadsheet, delete_spreadsheet, sheet_exists, get_socialcalc, ether_to_sgml, \
build_meta_tag, get_ether_stylesheets, get_file_list, postprocess_sgml
from modules.renderer import render
import modules.redis_cache as cache
# Support IIS site prefix on Windows
if platform.system() == "Windows":
prefix = "transc\\"
else:
prefix = ""
# Read configuration
scriptpath = os.path.dirname(os.path.realpath(__file__)) + os.sep
userdir = scriptpath + "users" + os.sep
templatedir = scriptpath + "templates" + os.sep
config = ConfigObj(userdir + 'config.ini')
project = config["project"]
editor_help_link = config["editor_help_link"]
# Captions and API URLs for NLP buttons
xml_nlp_button = config["xml_nlp_button"]
spreadsheet_nlp_button = config["spreadsheet_nlp_button"]
xml_nlp_api = config["xml_nlp_api"]
spreadsheet_nlp_api = config["spreadsheet_nlp_api"]
code_2fa = None
def harvest_meta(sgml):
"""
Get metadata key value pairs from <meta> element in imported SGML file
:param sgml: TT SGML as string
:return: dictionary of key value pairs
"""
sgml = sgml.replace("\r","").strip()
meta = {}
if not sgml.startswith("<meta "):
return meta
else:
metatag = re.search(r'<meta ([^\n]*)>',sgml).group(1)
matches = re.findall(r'([^ =>]+?)="([^"]+)"',metatag)
for match in matches:
meta[match[0].strip()] = match[1].strip().replace("<","<").replace(">",">")
return meta
def serialize_file(text_content,file_name):
f=open(prefix+file_name,'w')
f.write(text_content)#.encode("utf8"))
f.close()
def get_user_list():
user_list=[]
scriptpath = os.path.dirname(os.path.realpath(__file__)) + os.sep
userdir = scriptpath + "users" + os.sep
return get_file_list(userdir,"ini",forbidden=["admin","default","config"],hide_extension=True)
def load_page(user,admin,theform):
global ether_url
global code_2fa
if theform.getvalue("2fa"):
code_2fa = theform.getvalue("2fa")
else:
code_2fa = ""
max_id = generic_query("SELECT MAX(id) AS max_id FROM docs","")[0][0]
if not max_id: # This is for the initial case after init db
max_id = 0
text_content = ""
repo_name = ""
corpus = ""
status = ""
assignee = ""
mode = "xml"
schema = ""
doc_id = "" # Should only remain so if someone navigated directly to editor.py
docname = ""
old_docname, old_corpus, old_repo, old_status, old_assignee, old_mode, old_schema = ["", "", "", "", "", "", ""]
if int(admin) > 0:
git_username, git_token, git_2fa = get_git_credentials(user, admin, code_2fa)
else:
git_username, git_token, git_2fa = (None, None, None)
# dict of variables we'll need to render the html
render_data = {}
if theform.getvalue('id'):
doc_id = theform.getvalue('id')
if int(doc_id) > int(max_id):
# Creating new doc case, assign some default values
docname = "new_document"
repo_name = "account/repo_name"
status = "editing"
assignee = "default_user"
corpus = "default_corpus"
schema = ""
text_content = ""
# If one of the four forms is edited or we're cloning a doc, then we create the doc, otherwise nothing happens (user cannot fill in nothing and create the doc)
if theform.getvalue('edit_docname') and user != "demo":
if docname != 'new_document':
if doc_id > max_id:
create_document(doc_id, docname, corpus, status, assignee, repo_name, text_content)
max_id = doc_id
else:
update_docname(doc_id, docname)
if theform.getvalue('edit_filename') and user != "demo":
repo_name = theform.getvalue('edit_filename')
if repo_name != 'account/repo_name':
if doc_id > max_id:
create_document(doc_id, docname, corpus, status, assignee, repo_name, text_content)
max_id = doc_id
else:
update_filename(doc_id, repo_name)
if theform.getvalue('edit_corpusname') and user != "demo":
corpus = theform.getvalue('edit_corpusname')
if corpus != 'default_corpus':
if doc_id > max_id:
create_document(doc_id, docname, corpus, status, assignee, repo_name, text_content)
max_id = doc_id
else:
update_corpus(doc_id, corpus)
if theform.getvalue('edit_status') and user != "demo":
status = theform.getvalue('edit_status')
if status != 'editing':
if doc_id > max_id:
create_document(doc_id, docname, corpus, status, assignee, repo_name, text_content)
max_id = doc_id
else:
update_status(doc_id, status)
if theform.getvalue('edit_assignee') and user != "demo":
assignee = theform.getvalue('edit_assignee')
if assignee != "default_user":
if doc_id > max_id:
create_document(doc_id, docname, corpus, status, assignee, repo_name, text_content)
max_id = doc_id
else:
update_assignee(doc_id, assignee)
# cloning metadata from an existing doc into a new doc
if theform.getvalue('source_doc'):
source_meta = get_doc_meta(theform.getvalue('source_doc'))
if doc_id > max_id:
create_document(doc_id, docname, corpus, status, assignee, repo_name, text_content)
max_id = doc_id
for meta in source_meta:
m_key, m_val = meta[2:4]
save_meta(int(doc_id), m_key.decode("utf8"), m_val.decode("utf8"))
cache.invalidate_by_doc(doc_id, "meta")
else:
# Get previous values from DB
old_docname, old_corpus, old_repo, old_status, old_assignee, old_mode, old_schema = get_doc_info(doc_id)
# Assume new values are same, overwrite with different form values and update DB if new values found
docname, corpus, repo_name, status, assignee, mode, schema = old_docname, old_corpus, old_repo, old_status, old_assignee, old_mode, old_schema
docname = old_docname
# Handle switch to spreadsheet mode if NLP spreadsheet service is called
if theform.getvalue('nlp_spreadsheet') == "do_nlp_spreadsheet" and mode == "xml" and user != "demo":
data_to_process = generic_query("SELECT content FROM docs WHERE id=?",(doc_id,))[0][0]
api_call = spreadsheet_nlp_api
if api_call != "":
nlp_user, nlp_password = get_nlp_credentials()
data = {"data":data_to_process, "lb":"line", "format":"sgml_no_parse"}
resp = requests.post(api_call, data, auth=HTTPBasicAuth(nlp_user,nlp_password))
sgml = resp.text.encode("utf8")
postproc = config["nlp_postprocessing"] if "nlp_postprocessing" in config else None
sgml = postprocess_sgml(sgml,postproc)
else:
sgml = data_to_process.encode("utf8")
out, err = make_spreadsheet(sgml, ether_url + "_/gd_" + corpus + "_" + docname, "sgml")
mode = "ether"
# handle copying metadata
if theform.getvalue('source_doc'):
source_meta = get_doc_meta(theform.getvalue('source_doc'))
existing_meta_keys = [x[2] for x in get_doc_meta(doc_id)]
# don't overwrite existing keys
meta_to_write = [x for x in source_meta if x[2] not in existing_meta_keys]
for meta in meta_to_write:
m_key, m_val = meta[2], meta[3]
save_meta(int(doc_id), m_key, m_val)
cache.invalidate_by_doc(doc_id, "meta")
if theform.getvalue('edit_docname'):
docname = theform.getvalue('edit_docname')
elif old_docname != "":
docname = old_docname
if theform.getvalue('edit_corpusname'):
corpus = theform.getvalue('edit_corpusname')
elif old_corpus != "":
corpus = old_corpus
if theform.getvalue('id'):
if int(doc_id) <= int(max_id):
# After clicking edit in landing page, editing existing doc case, get the values from the db. pull the content from db to be displayed in the editor window.
if theform.getvalue('edit_docname'):
docname = theform.getvalue('edit_docname')
if docname != old_docname and user != "demo":
update_docname(doc_id,docname)
if theform.getvalue('edit_filename'):
repo_name=theform.getvalue('edit_filename')
if repo_name != old_repo and user != "demo":
update_filename(doc_id,repo_name)
if theform.getvalue('edit_corpusname'):
corpus = theform.getvalue('edit_corpusname')
if corpus != old_corpus and user != "demo":
update_corpus(doc_id,corpus)
if theform.getvalue('edit_status'):
status = theform.getvalue('edit_status')
if status != old_status and user != "demo":
update_status(doc_id,status)
if theform.getvalue('edit_assignee'):
assignee = theform.getvalue('edit_assignee')
if assignee != old_assignee and user != "demo":
update_assignee(doc_id,assignee)
if theform.getvalue('edit_mode'):
mode = theform.getvalue('edit_mode')
if mode != old_mode and user != "demo":
update_mode(doc_id,mode)
if theform.getvalue('nlp_spreadsheet') == "do_nlp_spreadsheet": # mode has been changed to spreadsheet via NLP
update_mode(doc_id, "ether")
mode = "ether"
if old_docname != docname or old_corpus != corpus:
old_sheet_name = "gd" + "_" + old_corpus + "_" + old_docname
if sheet_exists(ether_url, old_sheet_name): # Check if there is an ether sheet to copy
old_socialcalc = get_socialcalc(ether_url, old_sheet_name)
out, err = make_spreadsheet(old_socialcalc, ether_url + "_/gd_" + corpus + "_" + docname, "socialcalc")
if out == "OK":
delete_spreadsheet(ether_url,old_sheet_name)
text_content = generic_query("SELECT content FROM docs WHERE id=?",(doc_id,))[0][0]
# In the case of reloading after hitting 'save', either create new doc into db, or update db
# CodeMirror sends the form with its code content in it before 'save' so we just display it again
if theform.getvalue('code'):
text_content = theform.getvalue('code')
text_content = text_content.replace("\r","")
text_content = re.sub(r'&(?!amp;)',r'&',text_content) # Escape unescaped XML &
text_content = unicode(text_content.decode("utf8"))
if user != "demo":
if int(doc_id)>int(max_id):
create_document(doc_id, docname,corpus,status,assignee,repo_name,text_content)
else:
save_changes(doc_id,text_content)
cache.invalidate_by_doc(doc_id, "xml")
git_status=False
commit_message = ""
if theform.getvalue('commit_msg'):
commit_message = theform.getvalue('commit_msg')
if theform.getvalue('push_git') == "push_git":
repo_name = generic_query("SELECT filename FROM docs WHERE id=?", (doc_id,))[0][0]
file_name = generic_query("SELECT name FROM docs WHERE id=?", (doc_id,))[0][0]
repo_info = repo_name.split('/')
git_account, git_repo = repo_info[0], repo_info[1]
if len(repo_info) > 2:
subdir = '/'.join(repo_info[2:]) + "/"
else:
subdir = ""
# The user will indicate the subdir in the repo_name stored in the db.
# Therefore, a file may be associated with the target repo subdir zangsir/coptic-xml-tool/uploaded_commits,
# and that is fine, but we will need to make this uploaded_commits subdir first to create our file.
if not os.path.isdir(prefix + subdir) and subdir != "":
dirs = subdir.split(os.sep)[:-1]
path_so_far = ""
for dir in dirs:
if not os.path.isdir(prefix + path_so_far + dir + os.sep):
os.mkdir(prefix + path_so_far + dir + os.sep, 0755)
path_so_far += dir + os.sep
if mode == "xml":
text_content = generic_query("SELECT content FROM docs WHERE id=?", (doc_id,))[0][0]
serializable_content = build_meta_tag(doc_id) + text_content.strip() + "\n</meta>\n"
serializable_content = serializable_content.encode('utf8')
file_name = file_name.replace(" ","_") + ".xml"
else: # (mode == "ether")
text_content = ether_to_sgml(get_socialcalc(ether_url, "gd" + "_" + corpus + "_" + docname),doc_id)
serializable_content = text_content
file_name = file_name.replace(" ","_") + "_ether.sgml"
saved_file = subdir + file_name
serialize_file(serializable_content, saved_file)
git_status = push_update_to_git(git_username, git_token, saved_file, git_account, git_repo, commit_message)
# File system cleanup
if subdir == "":
# Delete a file
os.remove(prefix+file_name)
else:
# Delete a subdirectory
shutil.rmtree(prefix+subdir)
if theform.getvalue('nlp_xml') == "do_nlp_xml" and mode == "xml":
api_call = xml_nlp_api
if api_call != "":
nlp_user, nlp_password = get_nlp_credentials()
data = {"data":text_content, "format":"pipes"}
resp = requests.post(api_call, data, auth=HTTPBasicAuth(nlp_user,nlp_password))
text_content=resp.text
# Editing options
# Docname
# Filename
status_list = open(prefix+"status.tab").read().replace("\r","").split("\n")
render_data['status_options'] = [{'text': x, 'selected': x == status} for x in status_list]
render_data['assignee_options'] = [{'text': x, 'selected': x == assignee} for x in get_user_list()]
render_data['mode_options'] = [{'text': x, 'selected': x == mode} for x in ["xml", "ether"]]
render_data['nlp_service'] = {'xml_button_html': xml_nlp_button.decode("utf8"),
'spreadsheet_button_html': spreadsheet_nlp_button.decode("utf8"),
'disabled': user == "demo" or mode == "ether"}
render_data['git_2fa'] = git_2fa == "true"
if git_status:
render_data['git_commit_response'] = git_status.replace('<','').replace('>','')
# prepare embedded editor html
if mode == "ether":
render_data['ether_mode'] = True
ether_url += "gd_" + corpus + "_" + docname
render_data['ether_url'] = ether_url
render_data['ether_stylesheets'] = get_ether_stylesheets()
if "file" in theform and user != "demo":
fileitem = theform["file"]
if len(fileitem.filename) > 0:
# strip leading path from file name to avoid directory traversal attacks
fn = os.path.basename(fileitem.filename)
if fn.endswith(".xls") or fn.endswith(".xlsx"):
make_spreadsheet(fileitem.file.read(),"https://etheruser:etherpass@corpling.uis.georgetown.edu/ethercalc/_/gd_" + corpus + "_" + docname,"excel")
else:
sgml = fileitem.file.read()
meta_key_val = harvest_meta(sgml)
make_spreadsheet(sgml,"https://etheruser:etherpass@corpling.uis.georgetown.edu/ethercalc/_/gd_" + corpus + "_" + docname)
for (key, value) in iteritems(meta_key_val):
key = key.replace("@","_")
save_meta(int(doc_id),key.decode("utf8"),value.decode("utf8"))
cache.invalidate_by_doc(doc_id, "meta")
else:
render_data['ether_mode'] = False
# stop here if no doc selected
if doc_id:
render_data['doc_is_selected'] = len(doc_id) != 0
else:
return render("editor", render_data)
render_data['id'] = doc_id
render_data['mode'] = mode
render_data['schema'] = schema
render_data['docname'] = docname
render_data['corpusname'] = corpus
render_data['text_content'] = text_content
render_data['repo'] = repo_name
render_data["admin_gt_zero"] = int(admin) > 0
render_data["admin_eq_three"] = admin == "3"
# handle clone meta button, and allow github pushing
if int(admin) > 0:
doc_list = generic_query("SELECT id,corpus,name,status,assignee_username,mode FROM docs ORDER BY corpus, name COLLATE NOCASE",())
render_data["docs"] = []
for doc in doc_list:
doc_vars = {}
doc_vars["id"] = str(doc[0])
doc_vars["corpus"] = doc[1]
doc_vars["name"] = doc[2]
render_data['docs'].append(doc_vars)
render_data["can_save"] = not (int(admin) < 3)
render_data["editor_help_link_html"] = editor_help_link
render_data["first_load"] = len(theform.keys()) == 1
return render("editor", render_data)
def open_main_server():
thisscript = os.environ.get('SCRIPT_NAME', '')
action = None
theform = cgi.FieldStorage()
#print(theform)
scriptpath = os.path.dirname(os.path.realpath(__file__)) + os.sep
userdir = scriptpath + "users" + os.sep
action, userconfig = login(theform, userdir, thisscript, action)
user = userconfig["username"]
admin = userconfig["admin"]
print("Content-type:text/html\n\n")
try:
print(load_page(user, admin, theform).encode("utf8"))
except Exception as e:
print("""<html><body><h1>Loading Error</h1>
<p>For some reason, this page failed to load.</p>
<p>Please send this to your system administrator:</p>
<pre>""")
traceback.print_exc(e, file=sys.stdout)
print("""</pre></body></html>""")
if __name__ == "__main__":
open_main_server()
|
|
#-*- coding:utf-8 -*-
import logging
logging.basicConfig(level=logging.ERROR)
import asyncio
from pyblog.taskqueue import QueuePayloadJsonEncapsulator
from pyblog.config import Config
try:
import aiomysql
except ImportError:
logging.error("can't import 'aiomysql' module")
exit(-1)
try:
import aioredis
except ImportError:
logging.error("can't import 'aioredis' module")
exit(-1)
class ConfigError(Exception):
pass
class AsyncRedisConnection(object):
def __init__(self, host, port, loop=None):
super(AsyncRedisConnection, self).__init__()
assert isinstance(host, str)
assert isinstance(port, int)
self._host = host
self._port = port
self._loop = loop
self._connection = None
@asyncio.coroutine
def get_connection(self, loop=None):
if loop:
self._loop = loop
if self._loop:
self._connection = yield from aioredis.create_redis((self._host, self._port), loop=self._loop)
else:
self._connection = yield from aioredis.create_redis((self._host, self._port))
return self._connection
@asyncio.coroutine
def close(self):
if self._connection:
self._connection.close()
yield from self._connection.wait_closed()
class AsyncMysqlConnection(object):
_db_list = tuple()
_table_list = tuple()
def __init__(self, host, port, user, password, db):
assert isinstance(host, str)
assert isinstance(port, int)
assert isinstance(user, str)
assert isinstance(password, str)
assert isinstance(db, str)
self._host = host
self._port = port
self._user = user
self._password = password
self._db = db
@asyncio.coroutine
def get_connection(self, loop, db=None):
self._conn = yield from aiomysql.connect(host=self._host,
port=self._port,
user=self._user,
password=self._password,
loop=loop)
yield from self._get_db_list()
if not self._check_db(self._db):
yield from self._create_db(self._db)
yield from self._conn.select_db(self._db)
return self._conn
def _check_db(self, db_name):
if db_name in self._db_list:
return True
return False
@asyncio.coroutine
def _get_db_list(self):
cursor = yield from self._conn.cursor()
yield from cursor.execute('show databases')
dbs = yield from cursor.fetchall()
yield from cursor.close()
db_list = list()
for db in dbs:
db_list.append(db[0])
self._db_list = tuple(db_list)
return self._db_list
@asyncio.coroutine
def _create_db(self, db_name):
yield from self._conn.begin()
cursor = yield from self._conn.cursor()
try:
yield from cursor.execute("create database %s character set utf8" % db_name)
except Exception:
yield from self._conn.rollback()
finally:
yield from self._conn.commit()
yield from cursor.close()
@asyncio.coroutine
def close(self):
if self._conn:
self._conn.close()
class AsyncQueue(object):
def __init__(self, config):
self._config = config
@asyncio.coroutine
def enqueue(self, queue_name, payload):
pass
@asyncio.coroutine
def dequeue(self, queue_name):
pass
def __getattr__(self, key):
if key.split('_', 1)[1] in self._config:
return self._config.get(key.split('_', 1)[1])
else:
raise ConfigError("no '%s' config item" % key.split('_', 1)[1])
class AsyncRedisQueue(AsyncQueue):
def __init__(self, config, loop=None, connection=AsyncRedisConnection):
assert isinstance(config, dict)
super(AsyncRedisQueue, self).__init__(config)
self._connection_instance = connection(self._host, self._port, loop)
self._connection = None
@asyncio.coroutine
def enqueue(self, queue_name, payload):
if not self._connection:
self._connection = yield from self._connection_instance.get_connection()
yield from self._connection.lpush(queue_name, payload)
return payload
@asyncio.coroutine
def dequeue(self, queue_name):
if not self._connection:
self._connection = yield from self._connection_instance.get_connection()
payload = yield from self._connection.rpop(queue_name)
if payload and isinstance(payload, bytes):
payload = payload.encode("utf-8")
elif not payload:
payload = []
return payload
@asyncio.coroutine
def close_queue(self):
if self._connection_instance:
yield from self._connection_instance.close()
class AsyncMysqlQueue(AsyncQueue):
_queue_list = tuple()
def __init__(self, config, loop, connection=AsyncMysqlConnection):
assert isinstance(config, dict)
super(AsyncMysqlQueue, self).__init__(config)
self._loop = loop
self._queue_conn = None
self._connection_instance = connection(
self._host, self._port, self._user, self._password, self._db)
def _check_queue(self, queue_name):
if queue_name in self._queue_list:
return True
return False
@asyncio.coroutine
def _get_queue_list(self):
cursor = yield from self._queue_conn.cursor()
yield from cursor.execute('show tables')
tables = yield from cursor.fetchall()
queue_list = []
for table in tables:
queue_list.append(table[0])
self._queue_list = tuple(queue_list)
return self._queue_list
@asyncio.coroutine
def _create_queue(self, queue_name):
cursor = yield from self._queue_conn.cursor()
yield from self._queue_conn.begin()
try:
yield from cursor.execute('create table `%s`(`id` int unsigned not null auto_increment primary key,`payload` longtext )charset utf8' % (queue_name))
except Exception:
yield from self._queue_conn.rollback()
finally:
yield from self._queue_conn.commit()
yield from cursor.close()
@asyncio.coroutine
def _check_connection(self):
if not self._queue_conn:
self._queue_conn = yield from self._connection_instance.get_connection(self._loop)
yield from self._queue_conn.select_db(self._db)
yield from self._get_queue_list()
return self._queue_conn
@asyncio.coroutine
def enqueue(self, queue_name, payload):
assert isinstance(queue_name, str)
yield from self._check_connection()
if not self._check_queue(queue_name):
yield from self._create_queue(queue_name)
yield from self._get_queue_list()
cursor = yield from self._queue_conn.cursor()
yield from self._queue_conn.begin()
try:
yield from cursor.execute("insert into `%s` (`payload`) values('%s')" % (queue_name, payload))
except Exception as e:
yield from self._queue_conn.rollback()
finally:
yield from self._queue_conn.commit()
yield from cursor.close()
return payload
@asyncio.coroutine
def dequeue(self, queue_name):
assert isinstance(queue_name, str)
assert isinstance(payload, (str, bytes))
yield from self._check_connection()
if not self._check_queue(queue_name):
yield from self._create_queue(queue_name)
yield from self._get_queue_list()
cursor = yield from self._queue_conn.cursor()
yield from cursor.execute('select `id`,`payload` from `%s` order by `id` asc limit 1' % (queue_name))
ret = yield from cursor.fetchone()
if ret and len(ret) >= 1:
try:
yield from cursor.execute("delete from `%s` where `id`=%s" % (queue_name, ret[0]))
except Exception:
yield from self._queue_conn.rollback()
finally:
yield from self._queue_conn.commit()
yield from cursor.close()
return ret[1]
return []
@asyncio.coroutine
def close_queue(self):
if self._connection_instance:
yield from self._connection_instance.close()
class AsyncQueueOperator(object):
_queue_driver_class = {'mysql': AsyncMysqlQueue, 'redis': AsyncRedisQueue}
def __init__(self):
pass
def _get_mysql_queue_driver(self, config):
return self._queue_driver_class.get('mysql')(config, self._loop)
def _get_redis_queue_driver(self, config):
return self._queue_driver_class.get("redis")(config, self._loop)
class AsyncQueueReader(AsyncQueueOperator):
def __init__(self, config, loop, driver_name='mysql'):
assert isinstance(config, dict)
assert isinstance(driver_name, str)
self._loop = loop
self._reader_instance = getattr(
self, "_get_%s_queue_driver" % driver_name)(config)
@asyncio.coroutine
def read_from_queue(self, queue_name):
ret = yield from self._reader_instance.dequeue(queue_name)
return ret
@asyncio.coroutine
def close_reader(self):
yield from self._reader_instance.close_queue()
class AsyncQueueWriter(AsyncQueueOperator):
def __init__(self, config, loop, driver_name='mysql'):
assert isinstance(config, dict)
assert isinstance(driver_name, str)
self._loop = loop
self._writer_instance = getattr(
self, "_get_%s_queue_driver" % driver_name)(config)
@asyncio.coroutine
def write_to_queue(self, queue_name, payload):
yield from self._writer_instance.enqueue(queue_name, payload)
return payload
@asyncio.coroutine
def close_writer(self):
yield from self._writer_instance.close_queue()
class AsyncTask(object):
def __init__(self, task_type, tries, content, loop, config=None, driver_name=None, encapsulator=QueuePayloadJsonEncapsulator, writer=AsyncQueueWriter):
assert isinstance(content, (str, dict))
assert isinstance(task_type, str)
assert isinstance(tries, int)
self._content = content
self._task_type = task_type
self._tries = tries
self._encapsulator = encapsulator(
self._task_type, self._tries, self._content)
self._writer = AsyncQueueWriter(
config or Config.queue.all, loop, driver_name or Config.queue.driver_name)
@asyncio.coroutine
def start(self, queue_name=None):
if not queue_name:
queue_name = self._task_type
data = yield from self._writer.write_to_queue(queue_name, self._encapsulator.encapsulate())
yield from self._writer.close_writer()
def refresh_task(self, task_type, tries, content):
assert isinstance(task_type, str)
assert isinstance(tries, int)
assert isinstance(content, (str, dict))
self._tries = tries
self._task_type = task_type
self._content = content
if __name__ == '__main__':
r'''
@asyncio.coroutine
def go(loop,config=None):
#asyncqueue=AsyncMysqlQueue(loop,config)
#data=yield from asyncqueue.enqueue("msg",'shabi')
#print(data)
#asyncqueuewriter=AsyncQueueWriter(loop,config)
#data=yield from asyncqueuewriter.write_to_queue('mail','send mail to you')
#asyncreader=AsyncQueueReader(loop,config)
#data=yield from asyncreader.read_from_queue("msg")
#print(data)
asynctask=AsyncTask('mail',3,'send mail to 18281573692@163.com',loop,config,'mysql')
yield from asynctask.start()
loop=asyncio.get_event_loop()
config={
'host':'127.0.0.1',
'port':3306,
'user':'root',
'password':'526114',
'db':'queue'
}
loop.run_until_complete(go(loop,config))
loop.close()
'''
|
|
from pprint import pprint as pp
import pytest
from scout.exceptions import IntegrityError
def test_add_panel(adapter, testpanel_obj):
## GIVEN a adapter with without panels
assert adapter.panel_collection.find_one() is None
## WHEN inserting a panel
adapter.add_gene_panel(testpanel_obj)
## THEN assert that the panel was loaded
assert adapter.panel_collection.find_one()
def test_add_same_panel_twice(adapter, testpanel_obj):
panel_obj = testpanel_obj
## GIVEN a adapter without gene panels
assert adapter.panel_collection.find_one() is None
## WHEN inserting a panel twice
adapter.add_gene_panel(panel_obj)
## THEN assert that IntegrityError is raised
with pytest.raises(IntegrityError):
adapter.add_gene_panel(panel_obj)
def test_get_panel_by_version(adapter, testpanel_obj):
panel_obj = testpanel_obj
adapter.panel_collection.insert_one(panel_obj)
## GIVEN a adapter with one gene panel
assert adapter.panel_collection.find_one()
## WHEN getting a panel
res = adapter.gene_panel(panel_id=panel_obj["panel_name"], version=panel_obj["version"])
## THEN assert that the panel was loaded
assert res["panel_name"] == panel_obj["panel_name"]
def test_get_panel_by_name(adapter, panel_info, testpanel_obj):
adapter.panel_collection.insert_one(testpanel_obj)
## GIVEN a adapter with one gene panel
assert adapter.panel_collection.find_one()
## WHEN getting a panel without version
res = adapter.gene_panel(panel_id=panel_info["panel_name"])
## THEN assert that the panel was loaded
assert res["panel_name"] == panel_info["panel_name"]
def test_get_non_existing_panel(adapter, testpanel_obj):
panel_obj = testpanel_obj
adapter.panel_collection.insert_one(panel_obj)
## GIVEN a adapter with one gene panel
assert adapter.panel_collection.find_one()
## WHEN getting a panel
res = adapter.gene_panel(panel_id="non existing")
## THEN assert that the panel was loaded
assert res is None
def test_get_panel_multiple_versions(adapter, testpanel_obj):
## GIVEN an adapter with multiple versions of same gene panel
testpanel_obj["_id"] = 1
adapter.panel_collection.insert_one(testpanel_obj)
testpanel_obj["_id"] = 2
testpanel_obj["version"] = 2.0
adapter.panel_collection.insert_one(testpanel_obj)
res = adapter.gene_panels()
assert sum(1 for i in res) == 2
## WHEN getting a panel
res = adapter.gene_panel(panel_id=testpanel_obj["panel_name"])
## THEN assert that the last version is fetched
assert res["version"] == 2.0
def test_reset_pending(adapter, testpanel_obj, gene_obj):
"""Test the function that clears the pending changes from a gene panel"""
# GIVEN a gene panel
adapter.panel_collection.insert_one(testpanel_obj)
# and a gene
adapter.hgnc_collection.insert_one(gene_obj)
hgnc_obj = adapter.hgnc_collection.find_one()
## WHEN adding a pending action of this gene to a panel
res = adapter.add_pending(panel_obj=testpanel_obj, hgnc_gene=hgnc_obj, action="add")
assert len(res["pending"]) == 1
## IF reset pending is used to clear pending actions
## Then panel should not have any more pending actions
updated_panel = adapter.reset_pending(res)
assert updated_panel.get("pending") is None
def test_add_pending(adapter, testpanel_obj, gene_obj):
adapter.panel_collection.insert_one(testpanel_obj)
adapter.hgnc_collection.insert_one(gene_obj)
## GIVEN a adapter with one gene panel and a gene
panel_obj = adapter.panel_collection.find_one()
hgnc_obj = adapter.hgnc_collection.find_one()
assert panel_obj
assert hgnc_obj
## WHEN adding a pending action
res = adapter.add_pending(panel_obj=panel_obj, hgnc_gene=hgnc_obj, action="add")
## THEN assert that the last version is fetched
assert len(res["pending"]) == 1
def test_add_pending_wrong_action(adapter, testpanel_obj, gene_obj):
adapter.panel_collection.insert_one(testpanel_obj)
adapter.hgnc_collection.insert_one(gene_obj)
## GIVEN a adapter with one gene panel and a gene
panel_obj = adapter.panel_collection.find_one()
hgnc_obj = adapter.hgnc_collection.find_one()
assert panel_obj
assert hgnc_obj
## WHEN adding a pending action with invalid action
with pytest.raises(ValueError):
## THEN assert that an error is raised
res = adapter.add_pending(panel_obj=panel_obj, hgnc_gene=hgnc_obj, action="hello")
def test_update_panel_panel_name(adapter, testpanel_obj):
adapter.panel_collection.insert_one(testpanel_obj)
## GIVEN a adapter with a gene panel
panel_obj = adapter.panel_collection.find_one()
assert panel_obj
old_name = panel_obj["panel_name"]
new_name = "new name"
## WHEN updating the panel name
panel_obj["panel_name"] = new_name
res = adapter.update_panel(panel_obj)
## THEN assert that the last version is fetched
assert res["panel_name"] == new_name
def test_update_panel_panel_description(adapter, testpanel_obj):
adapter.panel_collection.insert_one(testpanel_obj)
## GIVEN a adapter with a gene panel
panel_obj = adapter.panel_collection.find_one()
assert panel_obj
assert panel_obj["description"]
# Update its description
panel_obj["description"] = "Test description"
res = adapter.update_panel(panel_obj)
## THEN assert that description was updated
assert res["description"] == "Test description"
def test_apply_pending_delete_gene(adapter, testpanel_obj):
adapter.panel_collection.insert_one(testpanel_obj)
## GIVEN a adapter with a gene panel
panel_obj = adapter.panel_collection.find_one()
assert panel_obj
gene = panel_obj["genes"][0]
hgnc_id = gene["hgnc_id"]
hgnc_symbol = gene["symbol"]
action = {"hgnc_id": hgnc_id, "action": "delete", "symbol": hgnc_symbol, "info": {}}
## WHEN adding a action to the panel
panel_obj["pending"] = [action]
old_version = panel_obj["version"]
updated_panel_id = adapter.apply_pending(panel_obj, panel_obj["version"] + 1)
updated_panel = adapter.panel_collection.find_one({"_id": updated_panel_id})
# assert that the updated panel has a newer version
assert updated_panel["version"] != old_version
## THEN assert that the new panel does not have the deleted gene
for gene in updated_panel["genes"]:
assert gene["hgnc_id"] != hgnc_id
def test_apply_pending_delete_two_genes(adapter, testpanel_obj):
adapter.panel_collection.insert_one(testpanel_obj)
## GIVEN a adapter with a gene panel
panel_obj = adapter.panel_collection.find_one()
assert panel_obj
gene = panel_obj["genes"][0]
gene2 = panel_obj["genes"][1]
hgnc_ids = [gene["hgnc_id"], gene2["hgnc_id"]]
action = {
"hgnc_id": gene["hgnc_id"],
"action": "delete",
"symbol": gene["symbol"],
"info": {},
}
action2 = {
"hgnc_id": gene2["hgnc_id"],
"action": "delete",
"symbol": gene2["symbol"],
"info": {},
}
panel_obj["pending"] = [action, action2]
updated_panel_id = adapter.apply_pending(panel_obj, panel_obj["version"])
updated_panel = adapter.panel_collection.find_one({"_id": updated_panel_id})
for gene in updated_panel["genes"]:
assert gene["hgnc_id"] not in hgnc_ids
def test_apply_pending_add_gene(adapter, testpanel_obj):
adapter.panel_collection.insert_one(testpanel_obj)
## GIVEN a adapter with a gene panel
panel_obj = adapter.panel_collection.find_one()
assert panel_obj
gene = panel_obj["genes"][0]
hgnc_id = gene["hgnc_id"]
hgnc_symbol = gene["symbol"]
panel_obj["genes"] = []
adapter.update_panel(panel_obj)
panel_obj = adapter.panel_collection.find_one()
assert len(panel_obj["genes"]) == 0
action = {"hgnc_id": hgnc_id, "action": "add", "symbol": hgnc_symbol, "info": {}}
panel_obj["pending"] = [action]
# update panel version to panel_version +1
updated_panel_id = adapter.apply_pending(panel_obj, panel_obj["version"] + 1)
updated_panel = adapter.panel_collection.find_one({"_id": updated_panel_id})
# assert that panel version was updated
assert updated_panel["version"] == panel_obj["version"] + 1
assert len(updated_panel["genes"]) == 1
def test_apply_pending_add_two_genes(adapter, testpanel_obj):
adapter.panel_collection.insert_one(testpanel_obj)
## GIVEN a adapter with a gene panel
panel_obj = adapter.panel_collection.find_one()
assert panel_obj
gene = panel_obj["genes"][0]
gene2 = panel_obj["genes"][1]
hgnc_ids = [gene["hgnc_id"], gene["hgnc_id"]]
hgnc_symbols = [gene["symbol"], gene["symbol"]]
panel_obj["genes"] = []
adapter.update_panel(panel_obj)
panel_obj = adapter.panel_collection.find_one()
assert len(panel_obj["genes"]) == 0
action1 = {
"hgnc_id": hgnc_ids[0],
"action": "add",
"symbol": hgnc_symbols[0],
"info": {},
}
action2 = {
"hgnc_id": hgnc_ids[1],
"action": "add",
"symbol": hgnc_symbols[1],
"info": {},
}
panel_obj["pending"] = [action1, action2]
# update panel without changing panel version
updated_panel_id = adapter.apply_pending(panel_obj, panel_obj["version"])
updated_panel = adapter.panel_collection.find_one({"_id": updated_panel_id})
# assert that panel version was NOT updated
assert updated_panel["version"] == panel_obj["version"]
assert len(updated_panel["genes"]) == 2
for gene in updated_panel["genes"]:
assert gene["hgnc_id"] in hgnc_ids
def test_apply_pending_edit_gene(adapter, testpanel_obj):
## GIVEN an adapter with a gene panel
adapter.panel_collection.insert_one(testpanel_obj)
panel_obj = adapter.panel_collection.find_one()
# Given a gene of this panel
gene = panel_obj["genes"][0]
hgnc_id = gene["hgnc_id"]
hgnc_symbol = gene["symbol"]
# without inheritance models
assert gene.get("inheritance_models") is None
assert gene.get("custom_inheritance_models") is None
# When applying the the pending update to customize inheritance models
action = {
"hgnc_id": hgnc_id,
"action": "edit",
"symbol": hgnc_symbol,
"info": {
"inheritance_models": ["AR"],
"custom_inheritance_models": ["model_1", "model_2"],
},
}
panel_obj["pending"] = [action]
updated_panel_id = adapter.apply_pending(panel_obj, panel_obj["version"] + 1)
# Then the updated panel
updated_panel = adapter.panel_collection.find_one({"_id": updated_panel_id})
# should show the right inheritance models
assert updated_panel["genes"][0]["inheritance_models"] == ["AR"]
# and the right custom inheritance models
assert updated_panel["genes"][0]["custom_inheritance_models"] == [
"model_1",
"model_2",
]
def test_clinical_symbols(case_obj, real_panel_database):
"""test function that returns a set of clinical genes symbols from test case panels"""
# GIVEN an adapter with genes and a gene panel
adapter = real_panel_database
test_panel = adapter.panel_collection.find_one()
# GIVEN a case analysed with that panel
case_obj["panels"] = [{"panel_id": test_panel["_id"]}]
# THEN the clinical_symbols function should return a valid set of clinical genes symbols for the case panel
clinical_symbols = adapter.clinical_symbols(case_obj)
assert len(clinical_symbols) > 0
def test_clinical_hgnc_ids(case_obj, real_panel_database):
"""test function that returns a set of clinical genes HGNC IDs from test case panels"""
# GIVEN an adapter with genes and a gene panel
adapter = real_panel_database
test_panel = adapter.panel_collection.find_one()
# GIVEN a case analysed with that panel
case_obj["panels"] = [{"panel_id": test_panel["_id"]}]
# THEN the clinical_hgnc_ids function should return a valid set of hgnc IDs for the case panel
clinical_hgnc_ids = adapter.clinical_symbols(case_obj)
assert len(clinical_hgnc_ids) > 0
|
|
## Copyright 2013 Matthew A. Robinson
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import pygame
import logging
import traceback
import sys
from pygame.locals import *
log = logging.getLogger("R.Console")
class ConsoleSTDOUT(object):
"""
Used to recieve output from python calls to print() and similar functions. It then transfers
these calls to the GameConsole.
"""
def __init__(self, gameConsole):
self.gameConsole = gameConsole
def write(self, data):
stream = "INFO ; STDOUT ; " + data + "\n"
log.debug("!@ STDOUT received " + data + " ]]")
if data != "\n":
self.gameConsole.write(stream)
self.gameConsole.flush()
def splitLine(string, overflow=70):
"""
Split a line with new lines where the line buffer width is 'overflow'
"""
w=[]
n=len(string)
for i in range(0,n,overflow):
w.append(string[i:i+overflow])
return w
##def lightenColor(color, value):
## """
## Brighten every color element value by 'value'
## """
## r, g, b, a = color
## r += value
## g += value
## b += value
## a = 255
## if r > 255:
## r = 255
## if g > 255:
## g = 255
## if b > 255:
## b = 255
## return (r,g,b,a)
class GameConsole(object):
MESSAGE_HEIGHT = 15 # spacing in pixels to give each message, including the message itself
CONSOLE_PADDING = 50 # space in pixels from the bottom of the screen where messages start
ENTRY_PADDING = 15 # space in pixels from the bottom of the screen where the entry box starts
PADDING_LEFT = 15 # padding in pixels from the left of the screen to text.
DARKEN_WIDTH = .80 # percent of screen width to darken from console background
TEXT_OVERFLOW = 80 # characters at 1280 px width
LOGSOURCE_SPACING = 25 # characters to space after logging source values
MESSAGE_BUFFER_LENGTH = 100 # messages to render before deleting
def __init__(self, game, level=logging.INFO):
sys.stdout = ConsoleSTDOUT(self)
rootLogger = logging.getLogger("R")
self.fps = 0
self._fpsUpdateWait = 0
self._fpsUpdateDelay = 100
self.scrollOffset = 1
GameConsole.TEXT_OVERFLOW = int(
GameConsole.TEXT_OVERFLOW * float(game.width) / 1280.0)
self.messages = []
self.game = game
self.hidden = True
self.env = self
self.stream = ""
self.entry = ""
self._everyOtherLine = -1
self._entrySurface = None
self._entryRect = None
self._consoleSurface = pygame.Surface(
(GameConsole.DARKEN_WIDTH*game.width, game.height)
)
self._consoleSurface.fill((0,0,0))
self._consoleSurface.set_alpha(200)
self._consoleSurface = self._consoleSurface.convert_alpha()
self.font = pygame.freetype.Font("font/consola.ttf",
ptsize = 12)
self._renderEntry()
handler = logging.StreamHandler(self)
handler.setLevel(level)
formatter = logging.Formatter(
"%(levelname)s ; %(name)s ; %(message)s")
handler.setFormatter(formatter)
rootLogger.addHandler(handler)
self.resetConfiguration()
for blacklistedSource in GameConsole.blacklistedSources:
self.blacklistSource(blacklistedSource)
def sprite(self, spriteName):
""" Return sprite from application registry """
return self.game.app.reg(spriteName)
def resetConfiguration(self):
""" Load default console configuration. """
exec(open("config/console.cfg", 'r').read())
def blacklistSource(self, source):
""" Prevent a logging source from logging to the console. """
log.info("blacklisting " + source)
if source not in GameConsole.blacklistedSources:
GameConsole.blacklistedSources.append(source)
def isSourceBlacklisted(self, source):
""" Return whether a given logsource is not allowed to log to the console. """
components = source.split(".")
i = 0
for component in components:
i += 1
testing = components[:i]
if ".".join(testing) in GameConsole.blacklistedSources:
return True
return False
def isEnvironment(self, environment):
# Deprecated!
""" Return whether 'environment' is a suitable environ for the console. """
return hasattr(environment, 'execute')
isEnv = isEnvironment
def getEnvironment(self):
return self._environment
def setEnvironment(self, environment):
if self.isEnv(environment):
self._environment = environment
else:
log.error("(execute) " + str(environment) + " is not an environment.")
env = property(getEnvironment, setEnvironment)
environment = property(getEnvironment, setEnvironment)
def resetEnvironment(self):
self.env = self
def resetEnv(self): #shorthand
self.env = self
def runScript(self, script):
"""
Run script from script directory.
See console command guide for shortcut ($)
"""
c = self
self = self.env
exec(open("script/" + script).read())
def execute(self, c, command):
""" Execute a console command with 'c' as the GameConsole instance. """
c = self # we only use 'c' in the execute function for compatibility with other environments!
self = self.env
log.info("(execute) " + command)
try:
if command[0] == "$":
self.runScript(command[1:] + ".py")
else:
if command[0] == "#":
self = c
if command[1] == "?":
exec("print(" + command[2:] + ")")
else:
exec(command[1:])
else:
if command[0] == "?":
exec("print(" + command[1:] + ")")
else:
exec(command)
except:
log.error("(execute) " + traceback.format_exc())
def executeEntry(self):
self.execute(self, self.entry)
self.entry = ""
self._renderEntry()
def hide(self):
self.hidden = True
def unhide(self):
self.hidden = False
def toggleHidden(self):
self.hidden = not self.hidden
def _renderEntry(self):
surface, rect = self.font.render(self.entry, (255,255,255,255))
rect.left = GameConsole.PADDING_LEFT
rect.bottom = self.game.height - GameConsole.ENTRY_PADDING
self._entrySurface = surface
self._entryRect = rect
def _renderFPS(self, fps):
surface, rect = self.font.render(str(fps), (255,255,255,255))
rect.left = GameConsole.PADDING_LEFT
rect.top = GameConsole.PADDING_LEFT
self._fpsSurface = surface
self._fpsRect = rect
def renderMessage(self, stream):
#log.debug("!@ rendering message stream: " + stream)
if self.game.quitting == False:
try:
levelname, source, message = stream.split(" ; ")
if not self.isSourceBlacklisted(source):
color = {"DEBUG":(200,200,200,255),"INFO":(150,150,255,255),
"WARNING":(255,255,50,255),"ERROR":(255,50,50,255),
"CRITICAL":(255,20,255,255)}[levelname]
## if self._everyOtherLine < 0:
## #log.debug("!@ EVERY OTHER LINE")
## color = lightenColor(color, 80)
## self._everyOtherLine = -self._everyOtherLine
multiline = message.split("\n")
newMultiline = []
for line in multiline:
if len(line) >= self.TEXT_OVERFLOW:
newMultiline += splitLine(line, self.TEXT_OVERFLOW)
else:
newMultiline += [line]
multiline = newMultiline
multiline[0] = source + " " * (self.LOGSOURCE_SPACING - len(source)) + multiline[0]
i = 0
for line in multiline[1:]:
i += 1
multiline[i] = " "*self.LOGSOURCE_SPACING + multiline[i]
for msg in multiline:
surface, rect = self.font.render(msg, color)
self.messages.append([surface,rect])
if len(self.messages) > self.MESSAGE_BUFFER_LENGTH:
self.messages = self.messages[1:]
self._recalculateCoordinates()
except:
log.error("!@ error rendering last stream" +\
traceback.format_exc())
def _recalculateCoordinates(self):
i = len(self.messages)
for message in self.messages:
i -= 1
message[1].top = self.game.height - \
GameConsole.MESSAGE_HEIGHT * (i + self.scrollOffset) - \
GameConsole.CONSOLE_PADDING
message[1].left = GameConsole.PADDING_LEFT
def draw(self, canvas):
canvas.blit(self._consoleSurface, (0,0))
canvas.blit(self._entrySurface, self._entryRect)
for message in self.messages:
if message[1].top > GameConsole.CONSOLE_PADDING and\
message[1].bottom < self.game.height - GameConsole.CONSOLE_PADDING:
canvas.blit(message[0], message[1])
canvas.blit(self._fpsSurface, self._fpsRect)
def entryAdd(self, unicode):
self.entry += unicode
self._renderEntry()
def entryBackspace(self):
self.entry = self.entry[:-1]
self._renderEntry()
def scrollUp(self, messages=1):
""" Move one message upwards through the Console buffer. """
self.scrollOffset -= messages
self._recalculateCoordinates()
def scrollDown(self, messages=1):
""" Move one message downwards through the Console buffer. """
self.scrollOffset += messages
self._recalculateCoordinates()
def update(self, dt):
self._fpsUpdateWait -= dt
if self._fpsUpdateWait <= 0.0:
self._renderFPS(int(self.game.clock.get_fps()))
self._fpsUpdateWait = self._fpsUpdateDelay
def write(self, data):
try:
self.stream += str(data)
except:
log.critical("!@ " + traceback.format_exc())
def flush(self):
try:
# ANYTHING you don't want to render to the
# game console, precede with these symbols:
# "!@ " (not including quotes)
if "!@ " not in self.stream:
self.renderMessage(self.stream[:-1])
self.stream = ""
except:
log.critical("!@ " + traceback.format_exc())
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Definitions shared by multiple clients."""
from collections import namedtuple
import sys
from google.protobuf.descriptor import FieldDescriptor
import gmusicapi
from gmusicapi.compat import json
from gmusicapi.exceptions import (
CallFailure, ParseException, ValidationException,
)
from gmusicapi.utils import utils
import requests
log = utils.DynamicClientLogger(__name__)
_auth_names = ('xt', 'sso', 'oauth')
"""
AuthTypes has fields for each type of auth, each of which store a bool:
xt: webclient xsrf param/cookie
sso: webclient Authorization header
oauth: musicmanager oauth header
"""
AuthTypes = namedtuple('AuthTypes', _auth_names)
def authtypes(**kwargs):
"""Convinience factory for AuthTypes that defaults authtypes to False."""
for name in _auth_names:
if name not in kwargs:
kwargs[name] = False
return AuthTypes(**kwargs)
class BuildRequestMeta(type):
"""Metaclass to create build_request from static/dynamic config."""
def __new__(cls, name, bases, dct):
#To not mess with mro and inheritance, build the class first.
new_cls = super(BuildRequestMeta, cls).__new__(cls, name, bases, dct)
merge_keys = ('headers', 'params')
all_keys = ('method', 'url', 'files', 'data', 'verify') + merge_keys
config = {} # stores key: val for static or f(*args, **kwargs) -> val for dyn
dyn = lambda key: 'dynamic_' + key
stat = lambda key: 'static_' + key
has_key = lambda key: hasattr(new_cls, key)
get_key = lambda key: getattr(new_cls, key)
for key in all_keys:
if not has_key(dyn(key)) and not has_key(stat(key)):
continue # this key will be ignored; requests will default it
if has_key(dyn(key)):
config[key] = get_key(dyn(key))
else:
config[key] = get_key(stat(key))
for key in merge_keys:
#merge case: dyn took precedence above, but stat also exists
if has_key(dyn(key)) and has_key(stat(key)):
def key_closure(stat_val=get_key(stat(key)), dyn_func=get_key(dyn(key))):
def build_key(*args, **kwargs):
dyn_val = dyn_func(*args, **kwargs)
stat_val.update(dyn_val)
return stat_val
return build_key
config[key] = key_closure()
#To explain some of the funkiness wrt closures, see:
# http://stackoverflow.com/questions/233673/lexical-closures-in-python
#create the actual build_request method
def req_closure(config=config):
def build_request(cls, *args, **kwargs):
req_kwargs = {}
for key, val in config.items():
if hasattr(val, '__call__'):
val = val(*args, **kwargs)
req_kwargs[key] = val
return req_kwargs
#return Request(**req_kwargs)
return build_request
new_cls.build_request = classmethod(req_closure())
return new_cls
class Call(object):
"""
Clients should use Call.perform().
Calls define how to build their requests through static and dynamic data.
For example, a request might always send some user-agent: this is static.
Or, it might need the name of a song to modify: this is dynamic.
Specially named fields define the data, and correspond with requests.Request kwargs:
method: eg 'GET' or 'POST'
url: string
files: dictionary of {filename: fileobject} files to multipart upload.
data: the body of the request
If a dictionary is provided, form-encoding will take place.
A string will be sent as-is.
verify: if True, verify SSL certs
params (m): dictionary of URL parameters to append to the URL.
headers (m): dictionary
Static data shold prepends static_ to a field:
class SomeCall(Call):
static_url = 'http://foo.com/thiscall'
And dynamic data prepends dynamic_ to a method:
class SomeCall(Call):
#*args, **kwargs are passed from SomeCall.build_request (and Call.perform)
def dynamic_url(endpoint):
return 'http://foo.com/' + endpoint
Dynamic data takes precedence over static if both exist,
except for attributes marked with (m) above. These get merged, with dynamic overriding
on key conflicts (though all this really shouldn't be relied on).
Here's a contrived example that merges static and dynamic headers:
class SomeCall(Call):
static_headers = {'user-agent': "I'm totally a Google client!"}
@classmethod
def dynamic_headers(cls, keep_alive=False):
return {'Connection': keep_alive}
If neither a static nor dynamic member is defined,
the param is not used to create the requests.Request.
Calls declare the kind of auth they require with an AuthTypes object named required_auth.
Calls must define parse_response.
Calls can also define filter_response, validate and check_success.
Calls are organized semantically, so one endpoint might have multiple calls.
"""
__metaclass__ = BuildRequestMeta
gets_logged = True
required_auth = authtypes() # all false by default
@classmethod
def parse_response(cls, response):
"""Parses a requests.Response to data."""
raise NotImplementedError
@classmethod
def validate(cls, response, msg):
"""Raise ValidationException on problems.
:param response: a requests.Response
:param msg: the result of parse_response on response
"""
pass
@classmethod
def check_success(cls, response, msg):
"""Raise CallFailure on problems.
:param response: a requests.Response
:param msg: the result of parse_response on response
"""
pass
@classmethod
def filter_response(cls, msg):
"""Return a version of a parsed response appropriate for logging."""
return msg # default to identity
@classmethod
def perform(cls, session, *args, **kwargs):
"""Send, parse, validate and check success of this call.
*args and **kwargs are passed to protocol.build_transaction.
:param session: a PlaySession used to send this request.
"""
#TODO link up these docs
call_name = cls.__name__
if cls.gets_logged:
log.debug("%s(args=%s, kwargs=%s)",
call_name,
[utils.truncate(a) for a in args],
dict((k, utils.truncate(v)) for (k, v) in kwargs.items())
)
else:
log.debug("%s(<omitted>)", call_name)
req_kwargs = cls.build_request(*args, **kwargs)
response = session.send(req_kwargs, cls.required_auth)
#TODO trim the logged response if it's huge?
# check response code
try:
response.raise_for_status()
except requests.HTTPError as e:
err_msg = str(e)
if cls.gets_logged:
err_msg += "\n(response was: %r)" % response.content
raise CallFailure(err_msg, call_name)
try:
parsed_response = cls.parse_response(response)
except ParseException:
err_msg = ("the server's response could not be understood."
" The call may still have succeeded, but it's unlikely.")
if cls.gets_logged:
err_msg += "\n(response was: %r)" % response.content
log.exception("could not parse %s response: %r", call_name, response.content)
else:
log.exception("could not parse %s response: (omitted)", call_name)
raise CallFailure(err_msg, call_name)
if cls.gets_logged:
log.debug(cls.filter_response(parsed_response))
try:
#order is important; validate only has a schema for a successful response
cls.check_success(response, parsed_response)
cls.validate(response, parsed_response)
except CallFailure:
raise
except ValidationException as e:
#TODO shouldn't be using formatting
err_msg = "the response format for %s was not recognized." % call_name
err_msg += "\n\n%s\n" % e
if cls.gets_logged:
raw_response = response.content
if len(raw_response) > 1000:
raw_response = raw_response[:1000] + '...'
err_msg += ("\nFirst, try the develop branch."
" If you can recreate this error with the most recent code"
" please [create an issue](http://goo.gl/qbAW8) that includes"
" the above ValidationException"
" and the following raw response:\n%r\n"
"\nA traceback follows:\n") % raw_response
log.exception(err_msg)
return parsed_response
@staticmethod
def _parse_json(text):
try:
return json.loads(text)
except ValueError as e:
trace = sys.exc_info()[2]
raise ParseException(str(e)), None, trace
@staticmethod
def _filter_proto(msg, make_copy=True):
"""Filter all byte fields in the message and submessages."""
filtered = msg
if make_copy:
filtered = msg.__class__()
filtered.CopyFrom(msg)
fields = filtered.ListFields()
#eg of filtering a specific field
#if any(fd.name == 'field_name' for fd, val in fields):
# filtered.field_name = '<name>'
#Filter all byte fields.
for field_name, val in ((fd.name, val) for fd, val in fields
if fd.type == FieldDescriptor.TYPE_BYTES):
setattr(filtered, field_name, "<%s bytes>" % len(val))
#Filter submessages.
for field in (val for fd, val in fields
if fd.type == FieldDescriptor.TYPE_MESSAGE):
#protobuf repeated api is bad for reflection
is_repeated = hasattr(field, '__len__')
if not is_repeated:
Call._filter_proto(field, make_copy=False)
else:
for i in range(len(field)):
#repeatedComposite does not allow setting
old_fields = [f for f in field]
del field[:]
field.extend([Call._filter_proto(f, make_copy=False)
for f in old_fields])
return filtered
class ClientLogin(Call):
"""Performs `Google ClientLogin
<https://developers.google.com/accounts/docs/AuthForInstalledApps#ClientLogin>`__."""
gets_logged = False
static_method = 'POST'
#static_headers = {'User-agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1'}
static_url = 'https://www.google.com/accounts/ClientLogin'
@classmethod
def dynamic_data(cls, Email, Passwd, accountType='HOSTED_OR_GOOGLE',
service='sj', source=None,
logintoken=None, logincaptcha=None):
"""Params align with those in the actual request.
If *source* is ``None``, ``'gmusicapi-<version>'`` is used.
Captcha requests are not yet implemented.
"""
if logintoken is not None or logincaptcha is not None:
raise ValueError('ClientLogin captcha handling is not yet implemented.')
if source is None:
source = 'gmusicapi-' + gmusicapi.__version__
return dict(
(name, val) for (name, val) in locals().items()
if name in set(('Email', 'Passwd', 'accountType', 'service', 'source',
'logintoken', 'logincaptcha'))
)
@classmethod
def parse_response(cls, response):
"""Return a dictionary of response key/vals.
A successful login will have SID, LSID, and Auth keys.
"""
# responses are formatted as, eg:
# SID=DQAAAGgA...7Zg8CTN
# LSID=DQAAAGsA...lk8BBbG
# Auth=DQAAAGgA...dk3fA5N
# or:
# Url=http://www.google.com/login/captcha
# Error=CaptchaRequired
# CaptchaToken=DQAAAGgA...dkI1LK9
# CaptchaUrl=Captcha?ctoken=HiteT...
ret = {}
for line in response.text.split('\n'):
if '=' in line:
var, val = line.split('=', 1)
ret[var] = val
return ret
@classmethod
def check_succes(cls, response, msg):
if response.status_code == 200:
raise CallFailure("status code %s != 200" % response.status_code, cls.__name__)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.kfac.layer_collection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.kfac.python.ops import fisher_factors
from tensorflow.contrib.kfac.python.ops import layer_collection
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class MockFisherBlock(object):
"""A fake FisherBlock."""
num_registered_minibatches = 2
def __init__(self, name='MockFisherBlock'):
self.name = name
def __eq__(self, other):
return isinstance(other, MockFisherBlock) and other.name == self.name
def __hash__(self):
return hash(self.name)
class LayerParametersDictTest(test.TestCase):
def testSetItem(self):
"""Ensure insertion, contains, retrieval works for supported key types."""
with ops.Graph().as_default():
lp_dict = layer_collection.LayerParametersDict()
x = array_ops.constant(0)
y0 = array_ops.constant(0)
y1 = array_ops.constant(0)
z0 = array_ops.constant(0)
z1 = array_ops.constant(0)
keys = [x, (y0, y1), [z0, z1]]
for key in keys:
lp_dict[key] = key
for key in keys:
self.assertTrue(key in lp_dict)
self.assertEqual(lp_dict[key], key)
def testSetItemOverlap(self):
"""Ensure insertion fails if key overlaps with existing key."""
with ops.Graph().as_default():
lp_dict = layer_collection.LayerParametersDict()
x = array_ops.constant(0)
y = array_ops.constant(0)
lp_dict[x] = 'value'
with self.assertRaises(ValueError):
lp_dict[(x, y)] = 'value'
# Ensure 'y' wasn't inserted.
self.assertTrue(x in lp_dict)
self.assertFalse(y in lp_dict)
class LayerCollectionTest(test.TestCase):
def testLayerCollectionInit(self):
lc = layer_collection.LayerCollection()
self.assertEqual(0, len(lc.get_blocks()))
self.assertEqual(0, len(lc.get_factors()))
self.assertFalse(lc.losses)
def testRegisterBlocks(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
lc = layer_collection.LayerCollection()
lc.register_fully_connected(
array_ops.constant(1), array_ops.constant(2), array_ops.constant(3))
lc.register_fully_connected(
array_ops.constant(1),
array_ops.constant(2),
array_ops.constant(3),
approx=layer_collection.APPROX_DIAGONAL_NAME)
lc.register_conv2d(
array_ops.constant(4), [1, 1, 1, 1], 'SAME',
array_ops.ones((1, 1, 1, 1)), array_ops.constant(3))
lc.register_conv2d(
array_ops.constant(4), [1, 1, 1, 1], 'SAME',
array_ops.ones((1, 1, 1, 1)), array_ops.constant(3),
approx=layer_collection.APPROX_DIAGONAL_NAME)
lc.register_generic(
array_ops.constant(5), 16, approx=layer_collection.APPROX_FULL_NAME)
lc.register_generic(
array_ops.constant(6),
16,
approx=layer_collection.APPROX_DIAGONAL_NAME)
self.assertEqual(6, len(lc.get_blocks()))
def testRegisterBlocksMultipleRegistrations(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
lc = layer_collection.LayerCollection()
key = array_ops.constant(1)
lc.register_fully_connected(key,
array_ops.constant(2), array_ops.constant(3))
with self.assertRaises(ValueError):
lc.register_generic(key, 16)
def testRegisterSingleParamNotRegistered(self):
x = variable_scope.get_variable('x', initializer=array_ops.constant(1,))
lc = layer_collection.LayerCollection()
lc.fisher_blocks = {
variable_scope.get_variable('y', initializer=array_ops.constant(1,)):
'1'
}
lc.register_block(x, 'foo')
def testShouldRegisterSingleParamRegistered(self):
x = variable_scope.get_variable('x', initializer=array_ops.constant(1,))
lc = layer_collection.LayerCollection()
lc.fisher_blocks = {x: '1'}
with self.assertRaises(ValueError):
lc.register_block(x, 'foo')
def testRegisterSingleParamRegisteredInTuple(self):
x = variable_scope.get_variable('x', initializer=array_ops.constant(1,))
y = variable_scope.get_variable('y', initializer=array_ops.constant(1,))
lc = layer_collection.LayerCollection()
lc.fisher_blocks = {(x, y): '1'}
lc.register_block(x, 'foo')
self.assertEqual(set(['1']), set(lc.get_blocks()))
def testRegisterTupleParamNotRegistered(self):
x = variable_scope.get_variable('x', initializer=array_ops.constant(1,))
y = variable_scope.get_variable('y', initializer=array_ops.constant(1,))
lc = layer_collection.LayerCollection()
lc.fisher_blocks = {
variable_scope.get_variable('z', initializer=array_ops.constant(1,)):
'1'
}
lc.register_block((x, y), 'foo')
self.assertEqual(set(['1', 'foo']), set(lc.get_blocks()))
def testRegisterTupleParamRegistered(self):
x = variable_scope.get_variable('x', initializer=array_ops.constant(1,))
y = variable_scope.get_variable('y', initializer=array_ops.constant(1,))
lc = layer_collection.LayerCollection()
lc.fisher_blocks = {(x, y): '1'}
with self.assertRaises(ValueError):
lc.register_block((x, y), 'foo')
def testRegisterTupleParamRegisteredInSuperset(self):
x = variable_scope.get_variable('x', initializer=array_ops.constant(1,))
y = variable_scope.get_variable('y', initializer=array_ops.constant(1,))
z = variable_scope.get_variable('z', initializer=array_ops.constant(1,))
lc = layer_collection.LayerCollection()
lc.fisher_blocks = {(x, y, z): '1'}
lc.register_block((x, y), 'foo')
self.assertEqual(set(['1']), set(lc.get_blocks()))
def testRegisterTupleParamSomeRegistered(self):
x = variable_scope.get_variable('x', initializer=array_ops.constant(1,))
y = variable_scope.get_variable('y', initializer=array_ops.constant(1,))
z = variable_scope.get_variable('z', initializer=array_ops.constant(1,))
lc = layer_collection.LayerCollection()
lc.fisher_blocks = {x: MockFisherBlock('1'), z: MockFisherBlock('2')}
lc.register_block((x, y), MockFisherBlock('foo'))
self.assertEqual(
set([MockFisherBlock('2'), MockFisherBlock('foo')]),
set(lc.get_blocks()))
def testRegisterTupleVarSomeRegisteredInOtherTuples(self):
x = variable_scope.get_variable('x', initializer=array_ops.constant(1,))
y = variable_scope.get_variable('y', initializer=array_ops.constant(1,))
z = variable_scope.get_variable('z', initializer=array_ops.constant(1,))
w = variable_scope.get_variable('w', initializer=array_ops.constant(1,))
lc = layer_collection.LayerCollection()
lc.fisher_blocks = {(x, z): '1', (z, w): '2'}
with self.assertRaises(ValueError):
lc.register_block((x, y), 'foo')
def testRegisterCategoricalPredictiveDistribution(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
logits = linalg_ops.eye(2)
lc = layer_collection.LayerCollection()
lc.register_categorical_predictive_distribution(logits, seed=200)
single_loss = sess.run(lc.total_sampled_loss())
lc2 = layer_collection.LayerCollection()
lc2.register_categorical_predictive_distribution(logits, seed=200)
lc2.register_categorical_predictive_distribution(logits, seed=200)
double_loss = sess.run(lc2.total_sampled_loss())
self.assertAlmostEqual(2 * single_loss, double_loss)
def testLossFunctionByName(self):
"""Ensure loss functions can be identified by name."""
with ops.Graph().as_default():
logits = linalg_ops.eye(2)
lc = layer_collection.LayerCollection()
# Create a new loss function by name.
lc.register_categorical_predictive_distribution(logits, name='loss1')
self.assertEqual(1, len(lc.losses))
# Add logits to same loss function.
lc.register_categorical_predictive_distribution(
logits, name='loss1', reuse=True)
self.assertEqual(1, len(lc.losses))
# Add another new loss function.
lc.register_categorical_predictive_distribution(logits, name='loss2')
self.assertEqual(2, len(lc.losses))
def testLossFunctionWithoutName(self):
"""Ensure loss functions get unique names if 'name' not specified."""
with ops.Graph().as_default():
logits = linalg_ops.eye(2)
lc = layer_collection.LayerCollection()
# Create a new loss function with default names.
lc.register_categorical_predictive_distribution(logits)
lc.register_categorical_predictive_distribution(logits)
self.assertEqual(2, len(lc.losses))
def testCategoricalPredictiveDistributionMultipleMinibatches(self):
"""Ensure multiple minibatches are registered."""
with ops.Graph().as_default():
batch_size = 3
output_size = 2
logits = array_ops.zeros([batch_size, output_size])
targets = array_ops.ones([batch_size], dtype=dtypes.int32)
lc = layer_collection.LayerCollection()
# Create a new loss function.
lc.register_categorical_predictive_distribution(
logits, targets=targets, name='loss1')
# Can add when reuse=True
lc.register_categorical_predictive_distribution(
logits, targets=targets, name='loss1', reuse=True)
# Can add when reuse=VARIABLE_SCOPE and reuse=True there.
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=True):
lc.register_categorical_predictive_distribution(
logits,
targets=targets,
name='loss1',
reuse=layer_collection.VARIABLE_SCOPE)
# Can't add when reuse=False
with self.assertRaises(KeyError):
lc.register_categorical_predictive_distribution(
logits, targets=targets, name='loss1', reuse=False)
# Can't add when reuse=VARIABLE_SCOPE and reuse=False there.
with self.assertRaises(KeyError):
lc.register_categorical_predictive_distribution(
logits,
targets=targets,
name='loss1',
reuse=layer_collection.VARIABLE_SCOPE)
self.assertEqual(len(lc.losses), 1)
loss = lc.losses[0]
# Three successful registrations.
self.assertEqual(loss.params.shape.as_list(),
[3 * batch_size, output_size])
self.assertEqual(loss.targets.shape.as_list(), [3 * batch_size])
def testRegisterCategoricalPredictiveDistributionBatchSize1(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
logits = random_ops.random_normal((1, 2))
lc = layer_collection.LayerCollection()
lc.register_categorical_predictive_distribution(logits, seed=200)
def testRegisterCategoricalPredictiveDistributionSpecifiedTargets(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
logits = array_ops.constant([[1., 2.], [3., 4.]], dtype=dtypes.float32)
lc = layer_collection.LayerCollection()
targets = array_ops.constant([0, 1], dtype=dtypes.int32)
lc.register_categorical_predictive_distribution(logits, targets=targets)
single_loss = sess.run(lc.total_loss())
self.assertAlmostEqual(1.6265233, single_loss)
def testRegisterNormalPredictiveDistribution(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
predictions = array_ops.constant(
[[1., 2.], [3., 4]], dtype=dtypes.float32)
lc = layer_collection.LayerCollection()
lc.register_normal_predictive_distribution(predictions, 1., seed=200)
single_loss = sess.run(lc.total_sampled_loss())
lc2 = layer_collection.LayerCollection()
lc2.register_normal_predictive_distribution(predictions, 1., seed=200)
lc2.register_normal_predictive_distribution(predictions, 1., seed=200)
double_loss = sess.run(lc2.total_sampled_loss())
self.assertAlmostEqual(2 * single_loss, double_loss)
def testRegisterNormalPredictiveDistributionSpecifiedTargets(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
predictions = array_ops.constant(
[[1., 2.], [3., 4.]], dtype=dtypes.float32)
lc = layer_collection.LayerCollection()
targets = array_ops.constant([[3., 1.], [4., 2.]], dtype=dtypes.float32)
lc.register_normal_predictive_distribution(
predictions, 2.**2, targets=targets)
single_loss = sess.run(lc.total_loss())
self.assertAlmostEqual(7.6983433, single_loss)
def ensureLayerReuseWorks(self, register_fn):
"""Ensure the 'reuse' keyword argument function as intended.
Args:
register_fn: function for registering a layer. Arguments are
layer_collection, reuse, and approx.
"""
# Fails on second if reuse=False.
lc = layer_collection.LayerCollection()
register_fn(lc)
with self.assertRaises(ValueError):
register_fn(lc, reuse=False)
# Succeeds on second if reuse=True.
lc = layer_collection.LayerCollection()
register_fn(lc)
register_fn(lc, reuse=True)
# Fails on second if reuse=VARIABLE_SCOPE and no variable reuse.
lc = layer_collection.LayerCollection()
register_fn(lc)
with self.assertRaises(ValueError):
register_fn(lc, reuse=layer_collection.VARIABLE_SCOPE)
# Succeeds on second if reuse=VARIABLE_SCOPE and variable reuse.
lc = layer_collection.LayerCollection()
register_fn(lc)
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=True):
register_fn(lc, reuse=layer_collection.VARIABLE_SCOPE)
# Fails if block type changes.
lc = layer_collection.LayerCollection()
register_fn(lc, approx=layer_collection.APPROX_KRONECKER_NAME)
with self.assertRaises(ValueError):
register_fn(lc, approx=layer_collection.APPROX_DIAGONAL_NAME, reuse=True)
# Fails if reuse requested but no FisherBlock exists.
lc = layer_collection.LayerCollection()
with self.assertRaises(KeyError):
register_fn(lc, reuse=True)
def testRegisterFullyConnectedReuse(self):
"""Ensure the 'reuse' works with register_fully_connected."""
with ops.Graph().as_default():
inputs = array_ops.ones([2, 10])
outputs = array_ops.zeros([2, 5])
params = (
variable_scope.get_variable('w', [10, 5]), #
variable_scope.get_variable('b', [5]))
def register_fn(lc, **kwargs):
lc.register_fully_connected(
params=params, inputs=inputs, outputs=outputs, **kwargs)
self.ensureLayerReuseWorks(register_fn)
def testRegisterConv2dReuse(self):
"""Ensure the 'reuse' works with register_conv2d."""
with ops.Graph().as_default():
inputs = array_ops.ones([2, 5, 5, 10])
outputs = array_ops.zeros([2, 5, 5, 3])
params = (
variable_scope.get_variable('w', [1, 1, 10, 3]), #
variable_scope.get_variable('b', [3]))
def register_fn(lc, **kwargs):
lc.register_conv2d(
params=params,
strides=[1, 1, 1, 1],
padding='SAME',
inputs=inputs,
outputs=outputs,
**kwargs)
self.ensureLayerReuseWorks(register_fn)
def testMakeOrGetFactor(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
lc = layer_collection.LayerCollection()
key = array_ops.constant(1)
lc.make_or_get_factor(fisher_factors.FullFactor, ((key,), 16))
lc.make_or_get_factor(fisher_factors.FullFactor, ((key,), 16))
lc.make_or_get_factor(fisher_factors.FullFactor,
((array_ops.constant(2),), 16))
self.assertEqual(2, len(lc.get_factors()))
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertTrue(
all([var.name.startswith('LayerCollection') for var in variables]))
def testMakeOrGetFactorCustomScope(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
scope = 'Foo'
lc = layer_collection.LayerCollection(name=scope)
key = array_ops.constant(1)
lc.make_or_get_factor(fisher_factors.FullFactor, ((key,), 16))
lc.make_or_get_factor(fisher_factors.FullFactor, ((key,), 16))
lc.make_or_get_factor(fisher_factors.FullFactor,
((array_ops.constant(2),), 16))
self.assertEqual(2, len(lc.get_factors()))
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertTrue(all([var.name.startswith(scope) for var in variables]))
def testGetUseCountMap(self):
"""Ensure get_use_count_map() sums 'num_registered_minibatches'."""
lc = layer_collection.LayerCollection()
lc.fisher_blocks = {
'a': MockFisherBlock(),
('a', 'c'): MockFisherBlock(),
('b', 'c'): MockFisherBlock()
}
use_count_map = lc.get_use_count_map()
self.assertDictEqual({'a': 4, 'b': 2, 'c': 4}, use_count_map)
if __name__ == '__main__':
test.main()
|
|
#########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import copy
import uuid
import dateutil.parser
from datetime import timedelta
from manager_rest.test.base_test import CLIENT_API_VERSION
from manager_rest import utils
from manager_rest.test import base_test
from cloudify_rest_client import exceptions
from cloudify_rest_client.deployment_modifications import (
DeploymentModification)
EXPECTS_SCALING_GROUPS = CLIENT_API_VERSION not in ['v1', 'v2']
class ModifyTests(base_test.BaseServerTestCase):
def test_data_model_with_finish(self):
def expected_after_end_func(_, before_end):
return before_end
self._test_data_model_impl(
end_func=self.client.deployment_modifications.finish,
expected_end_status=DeploymentModification.FINISHED,
expected_end_node_counts={
'num': 2, 'deploy_num': 1, 'planned_num': 2},
expected_before_end_func=lambda before_end: [],
expected_after_end_func=expected_after_end_func,
expected_after_end_count=3,
expected_after_end_runtime_property='after_start')
def test_data_model_with_rollback(self):
def expected_after_end_func(before_modification, _):
return before_modification
self._test_data_model_impl(
end_func=self.client.deployment_modifications.rollback,
expected_end_status=DeploymentModification.ROLLEDBACK,
expected_end_node_counts={
'num': 1, 'deploy_num': 1, 'planned_num': 1},
expected_before_end_func=lambda before_end: before_end,
expected_after_end_func=expected_after_end_func,
expected_after_end_count=2,
expected_after_end_runtime_property='before_start')
def _test_data_model_impl(
self,
end_func,
expected_end_status,
expected_end_node_counts,
expected_before_end_func,
expected_after_end_func,
expected_after_end_count,
expected_after_end_runtime_property):
def node_assertions(num, deploy_num, planned_num):
node = self.client.nodes.get(deployment.id, 'node1')
self.assertEqual(node.number_of_instances, num)
self.assertEqual(node.planned_number_of_instances, planned_num)
self.assertEqual(node.deploy_number_of_instances, deploy_num)
_, _, _, deployment = self.put_deployment(
deployment_id='d{0}'.format(uuid.uuid4()),
blueprint_file_name='modify1.yaml')
node_assertions(num=1, deploy_num=1, planned_num=1)
mock_context = {'some': 'data'}
node1_instance = self.client.node_instances.list(
deployment_id=deployment.id, node_name='node1')[0]
self.client.node_instances.update(
node1_instance.id,
runtime_properties={'test': 'before_start'},
version=1)
node2_instance = self.client.node_instances.list(
deployment_id=deployment.id, node_name='node2')[0]
self.client.node_instances.update(
node2_instance.id,
runtime_properties={'test': 'before_start'},
version=1)
before_modification = self._get_items(
self.client.node_instances.list,
deployment_id=deployment.id
)
modified_nodes = {'node1': {'instances': 2}}
modification = self.client.deployment_modifications.start(
deployment.id, nodes=modified_nodes, context=mock_context)
self._fix_modification(modification)
self._assert_instances_equal(
modification.node_instances.before_modification,
before_modification)
self.assertIsNone(modification.ended_at)
self.client.node_instances.update(
node1_instance.id,
runtime_properties={'test': 'after_start'},
version=2)
self.client.node_instances.update(
node2_instance.id,
runtime_properties={'test': 'after_start'},
# version is 3 here because the modification increased it by 1
version=3)
node_assertions(num=1, deploy_num=1, planned_num=2)
modification_id = modification.id
self.assertEqual(modification.status,
DeploymentModification.STARTED)
before_end = self._get_items(self.client.node_instances.list,
deployment_id=deployment.id)
node1_instances = self.client.node_instances.list(
deployment_id=deployment.id, node_name='node1')
node2_instance1 = self.client.node_instances.list(
deployment_id=deployment.id, node_name='node2')[0]
self.assertEqual(
node1_instances[0]['index'],
node1_instance['index'])
self.assertEqual(
node2_instance1['index'],
node2_instance['index'])
self.assertNotEqual(
node1_instances[0]['index'],
node1_instances[1]['index'])
end_func(modification_id)
after_end = self._get_items(self.client.node_instances.list,
deployment_id=deployment.id)
node_assertions(**expected_end_node_counts)
modification = self.client.deployment_modifications.get(
modification.id)
self._fix_modification(modification)
self.assertEqual(modification.id, modification_id)
self.assertEqual(modification.status, expected_end_status)
self.assertEqual(modification.deployment_id, deployment.id)
self.assertEqual(modification.modified_nodes, modified_nodes)
created_at = dateutil.parser.parse(modification.created_at)
ended_at = dateutil.parser.parse(modification.ended_at)
self.assertTrue(
dateutil.parser.parse(utils.get_formatted_timestamp()) -
timedelta(seconds=5) <=
created_at <= ended_at <=
dateutil.parser.parse(utils.get_formatted_timestamp()))
all_modifications = self._get_items(
self.client.deployment_modifications.list
)
dep_modifications = self._get_items(
self.client.deployment_modifications.list,
deployment_id=deployment.id
)
for modification in all_modifications + dep_modifications:
self._fix_modification(modification)
self.assertEqual(len(dep_modifications), 1)
self.assertEqual(dep_modifications[0], modification)
self.assertEqual(all_modifications, dep_modifications)
modifications_list = self._get_items(
self.client.deployment_modifications.list,
deployment_id='i_really_should_not_exist'
)
self.assertEqual([], modifications_list)
self._assert_instances_equal(
modification.node_instances.before_modification,
before_modification)
self._assert_instances_equal(
modification.node_instances.before_rollback,
expected_before_end_func(before_end))
self._assert_instances_equal(
after_end,
expected_after_end_func(before_modification, before_end))
self.assertEqual(modification.context, mock_context)
self.assertEqual(expected_after_end_count, len(after_end))
self.assertEqual(
self.client.node_instances.get(
node1_instance.id).runtime_properties['test'],
expected_after_end_runtime_property)
self.assertEqual(
self.client.node_instances.get(
node2_instance.id).runtime_properties['test'],
expected_after_end_runtime_property)
def _assert_instances_equal(self, instances1, instances2):
def sort_key(instance):
return instance['id']
self.assertEqual(sorted(instances1, key=sort_key),
sorted(instances2, key=sort_key))
@staticmethod
def _fix_modification(modification):
if not EXPECTS_SCALING_GROUPS:
for node_instances in modification.node_instances.values():
for node_instance in node_instances:
node_instance.pop('scaling_groups', None)
def _get_items(self, list_func, *args, **kwargs):
if CLIENT_API_VERSION != 'v1':
items = list_func(*args, **kwargs).items
else:
items = list_func(*args, **kwargs)
return items
def test_no_concurrent_modifications(self):
blueprint_id, _, _, deployment = self.put_deployment(
deployment_id='d{0}'.format(uuid.uuid4()),
blueprint_file_name='modify1.yaml')
deployment2 = self.client.deployments.create(
blueprint_id=blueprint_id,
deployment_id='d{0}'.format(uuid.uuid4()))
self.create_deployment_environment(deployment=deployment2)
modification = self.client.deployment_modifications.start(
deployment.id, nodes={})
# should not allow another deployment modification of the same
# deployment to start
with self.assertRaises(
exceptions.ExistingStartedDeploymentModificationError) as e:
self.client.deployment_modifications.start(deployment.id, nodes={})
self.assertIn(modification.id, str(e.exception))
# should allow another deployment modification of a different
# deployment to start
self.client.deployment_modifications.start(deployment2.id, nodes={})
self.client.deployment_modifications.finish(modification.id)
# should allow deployment modification to start after previous one
# finished
self.client.deployment_modifications.start(deployment.id, nodes={})
def test_finish_and_rollback_on_ended_modification(self):
def test(end_function):
_, _, _, deployment = self.put_deployment(
deployment_id='d{0}'.format(uuid.uuid4()),
blueprint_id='b{0}'.format(uuid.uuid4()),
blueprint_file_name='modify1.yaml')
modification = self.client.deployment_modifications.start(
deployment.id, nodes={})
end_function(modification.id)
with self.assertRaises(
exceptions.DeploymentModificationAlreadyEndedError):
self.client.deployment_modifications.finish(modification.id)
with self.assertRaises(
exceptions.DeploymentModificationAlreadyEndedError):
self.client.deployment_modifications.rollback(modification.id)
test(self.client.deployment_modifications.finish)
test(self.client.deployment_modifications.rollback)
def test_finish_and_rollback_on_non_existent_modification(self):
with self.assertRaises(exceptions.CloudifyClientError) as scope:
self.client.deployment_modifications.finish('what')
self.assertEqual(scope.exception.status_code, 404)
self.assertEqual(str(scope.exception),
'404: Requested `DeploymentModification` '
'with ID `what` was not found')
with self.assertRaises(exceptions.CloudifyClientError) as scope:
self.client.deployment_modifications.rollback('what')
self.assertEqual(scope.exception.status_code, 404)
self.assertEqual(str(scope.exception),
'404: Requested `DeploymentModification` '
'with ID `what` was not found')
def test_modify_add_instance(self):
_, _, _, deployment = self.put_deployment(
deployment_id='d{0}'.format(uuid.uuid4()),
blueprint_file_name='modify1.yaml')
node_instances1 = self.client.node_instances.list()
self.assertEqual(2, len(node_instances1))
self._assert_number_of_instances(deployment.id, 'node1', 1, 1)
modified_nodes = {'node1': {'instances': 2}}
modification = self.client.deployment_modifications.start(
deployment.id, nodes=modified_nodes)
self._assert_number_of_instances(deployment.id, 'node1', 1, 1)
node_instances2 = self.client.node_instances.list()
self.assertEqual(3, len(node_instances2))
initial_instance_ids = [i2.id for i2 in node_instances1]
new_instances = [i for i in node_instances2
if i.id not in initial_instance_ids]
old_instances = [i for i in node_instances2
if i.id in initial_instance_ids]
self.assertEqual(1, len(new_instances))
self.assertEqual(2, len(old_instances))
new_instance = new_instances[0]
self.assertEqual('node1', new_instance.node_id)
expected_old_instances = copy.deepcopy(node_instances1)
for instance in expected_old_instances:
if instance.node_id == 'node2':
current_relationship = instance.relationships[0]
new_relationship = copy.deepcopy(current_relationship)
new_relationship['target_id'] = new_instance.id
instance.relationships.append(new_relationship)
instance['version'] += 1
self.assertEqual(sorted(old_instances, key=lambda _i: _i.id),
sorted(expected_old_instances, key=lambda _i: _i.id))
added_and_related = modification.node_instances.added_and_related
self.assertEqual(2, len(added_and_related))
self.client.deployment_modifications.finish(modification.id)
self._assert_number_of_instances(deployment.id, 'node1', 2, 1)
node_instances3 = self.client.node_instances.list()
self.assertEqual(3, len(node_instances3))
node1_instance_ids = [i.id for i in node_instances3
if i.node_id == 'node1']
node2_instance = [i for i in node_instances3
if i.node_id == 'node2'][0]
node2_target_ids = [rel['target_id'] for rel
in node2_instance.relationships]
self.assertEqual(set(node1_instance_ids), set(node2_target_ids))
def test_modify_remove_instance(self):
_, _, _, deployment = self.put_deployment(
deployment_id='d{0}'.format(uuid.uuid4()),
blueprint_file_name='modify2.yaml')
node_instances1 = self.client.node_instances.list()
self.assertEqual(3, len(node_instances1))
self._assert_number_of_instances(deployment.id, 'node1', 2, 2)
modified_nodes = {'node1': {'instances': 1}}
modification = self.client.deployment_modifications.start(
deployment.id, nodes=modified_nodes)
self._assert_number_of_instances(deployment.id, 'node1', 2, 2)
node_instances2 = self.client.node_instances.list()
self.assertEqual(3, len(node_instances2))
initial_instance_ids = [i2.id for i2 in node_instances1]
new_instances = [i for i in node_instances2
if i.id not in initial_instance_ids]
old_instances = [i for i in node_instances2
if i.id in initial_instance_ids]
self.assertEqual(0, len(new_instances))
self.assertEqual(3, len(old_instances))
self.assertEqual(sorted(old_instances, key=lambda _i: _i.id),
sorted(node_instances1, key=lambda _i: _i.id))
removed_and_related = modification.node_instances.removed_and_related
self.assertEqual(2, len(removed_and_related))
self.client.deployment_modifications.finish(modification.id)
self._assert_number_of_instances(deployment.id, 'node1', 1, 2)
node_instances3 = self.client.node_instances.list()
self.assertEqual(2, len(node_instances3))
node1_instance_id = [i.id for i in node_instances3
if i.node_id == 'node1'][0]
node2_instance = [i for i in node_instances3
if i.node_id == 'node2'][0]
node2_target_ids = [rel['target_id'] for rel
in node2_instance.relationships]
self.assertEqual(1, len(node2_target_ids))
self.assertEqual(node1_instance_id, node2_target_ids[0])
def _assert_number_of_instances(self,
deployment_id, node_id,
expected_number_of_instances,
expected_deploy_number_of_instances):
node = self.client.nodes.get(deployment_id, node_id)
self.assertEqual(expected_deploy_number_of_instances,
node.deploy_number_of_instances)
self.assertEqual(expected_number_of_instances,
node.number_of_instances)
def test_scaling_groups_finish(self):
self._test_scaling_groups(
end_method=self.client.deployment_modifications.finish,
end_expectation={'current': 2, 'planned': 2})
def test_scaling_groups_rollback(self):
self._test_scaling_groups(
end_method=self.client.deployment_modifications.rollback,
end_expectation={'current': 1, 'planned': 1})
def _test_scaling_groups(self, end_method, end_expectation):
_, _, _, deployment = self.put_deployment(
blueprint_file_name='modify3-scale-groups.yaml')
def assert_deployment_instances(dep, current, planned):
props = dep['scaling_groups']['group']['properties']
self.assertEqual(current, props['current_instances'])
self.assertEqual(planned, props['planned_instances'])
def assert_instances(current, planned):
# Test get and list deployments endpoints
dep1 = self.client.deployments.get(deployment.id)
dep2 = self.client.deployments.list()[0]
for dep in [dep1, dep2]:
assert_deployment_instances(dep, current, planned)
assert_deployment_instances(deployment, current=1, planned=1)
assert_instances(current=1, planned=1)
modified_nodes = {'group': {'instances': 2}}
modification = self.client.deployment_modifications.start(
deployment.id, nodes=modified_nodes)
assert_instances(current=1, planned=2)
# verify node instances scaling groups are also updated for newly
# added nodes
node_instances = self.client.node_instances.list(
deployment_id=deployment.id
).items
self.assertEqual(2, len(node_instances))
self.assertIsNotNone(node_instances[0]['index'])
self.assertIsNotNone(node_instances[1]['index'])
self.assertNotEqual(node_instances[0]['index'],
node_instances[1]['index'])
for instance in node_instances:
node_instance_scaling_groups = instance['scaling_groups']
self.assertEqual(1, len(node_instance_scaling_groups))
self.assertEqual('group', node_instance_scaling_groups[0]['name'])
end_method(modification.id)
assert_instances(**end_expectation)
deployment = self.client.deployments.get(deployment.id)
self.client.deployments.delete(deployment.id)
assert_deployment_instances(deployment, **end_expectation)
def test_relationship_order_of_related_nodes(self):
_, _, _, deployment = self.put_deployment(
blueprint_file_name='modify4-relationship-order.yaml')
modification = self.client.deployment_modifications.start(
deployment_id=deployment.id,
nodes={'node{}'.format(index): {'instances': 2}
for index in [1, 2, 4, 5]})
self.client.deployment_modifications.finish(modification.id)
node6 = self.client.node_instances.list(node_id='node6')[0]
expected = [
('node1', 'connected_to'),
('node1', 'connected_to'),
('node2', 'connected_to'),
('node2', 'connected_to'),
('node3', 'contained_in'),
('node4', 'connected_to'),
('node4', 'connected_to'),
('node5', 'connected_to'),
('node5', 'connected_to'),
]
relationships = node6.relationships
self.assertEqual(len(expected), len(relationships))
for (target_name, tpe), relationship in zip(expected, relationships):
self.assertDictContainsSubset({
'target_name': target_name,
'type': 'cloudify.relationships.{0}'.format(tpe)
}, relationship)
|
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class Asset(IdentifiedObject):
"""Tangible resource of the utility, including power system equipment, cabinets, buildings, etc. For electrical network equipment, the role of the asset is defined through PowerSystemResource and its subclasses, defined mainly in the Wires model (refer to IEC61970-301 and model package IEC61970::Wires). Asset description places emphasis on the physical characteristics of the equipment fulfilling that role.Tangible resource of the utility, including power system equipment, cabinets, buildings, etc. For electrical network equipment, the role of the asset is defined through PowerSystemResource and its subclasses, defined mainly in the Wires model (refer to IEC61970-301 and model package IEC61970::Wires). Asset description places emphasis on the physical characteristics of the equipment fulfilling that role.
"""
def __init__(self, corporateCode='', utcNumber='', initialCondition='', category='', serialNumber='', critical=False, application='', purchasePrice=0.0, initialLossOfLife=0.0, manufacturedDate='', installationDate='', lotNumber='', DocumentRoles=None, Mediums=None, ErpRecDeliveryItems=None, Ratings=None, ToAssetRoles=None, electronicAddress=None, FromAssetRoles=None, AssetFunctions=None, ErpItemMaster=None, ErpInventory=None, ReliabilityInfos=None, AssetInfo=None, WorkTask=None, acceptanceTest=None, ErpOrganisationRoles=None, ScheduledEvents=None, AssetContainer=None, AssetPropertyCurves=None, ChangeItems=None, ActivityRecords=None, Location=None, status=None, PowerSystemResources=None, FinancialInfo=None, Properties=None, Measurements=None, *args, **kw_args):
"""Initialises a new 'Asset' instance.
@param corporateCode: Code for this type of asset.
@param utcNumber: Uniquely tracked commodity (UTC) number.
@param initialCondition: Condition of asset in inventory or at time of installation. Examples include new, rebuilt, overhaul required, other. Refer to inspection data for information on the most current condition of the asset.
@param category: Extension mechanism to accommodate utility-specific categorisation of Asset and its subtypes, according to their corporate standards, practices, and existing IT systems (e.g., for management of assets, maintenance, work, outage, customers, etc.).
@param serialNumber: Serial number of this asset.
@param critical: True if asset is considered critical for some reason (for example, a pole with critical attachments).
@param application: The way this particular asset is being used in this installation. For example, the application of a bushing when attached to a specific transformer winding would be one of the following: H1, H2, H3, H0, X1, X2, X3, X0, Y1, Y2, Y3, Y0.
@param purchasePrice: Purchase price of asset.
@param initialLossOfLife: Whenever an asset is reconditioned, percentage of expected life for the asset when it was new; zero for new devices.
@param manufacturedDate: Date this asset was manufactured.
@param installationDate: (if applicable) Date current installation was completed, which may not be the same as the in-service date. Asset may have been installed at other locations previously. Ignored if asset is (1) not currently installed (e.g., stored in a depot) or (2) not intended to be installed (e.g., vehicle, tool).
@param lotNumber: Lot number for this asset. Even for the same model and version number, many assets are manufactured in lots.
@param DocumentRoles:
@param Mediums:
@param ErpRecDeliveryItems:
@param Ratings: UserAttributes used to specify ratings of this asset. Ratings also can be used to set the initial value of operational measurement limits. Use 'name' to specify what kind of rating it is (e.g., voltage, current), and 'value' attribute for the actual value and unit information of the rating.
@param ToAssetRoles:
@param electronicAddress: Electronic address.
@param FromAssetRoles:
@param AssetFunctions:
@param ErpItemMaster:
@param ErpInventory:
@param ReliabilityInfos:
@param AssetInfo: Data applicable to this asset.
@param WorkTask:
@param acceptanceTest: Information on acceptance test.
@param ErpOrganisationRoles:
@param ScheduledEvents:
@param AssetContainer:
@param AssetPropertyCurves:
@param ChangeItems:
@param ActivityRecords: All activity records created for this asset.
@param Location: Location of this asset.
@param status: Status of this asset.
@param PowerSystemResources: All power system resources used to electrically model this asset. For example, transformer asset is electrically modelled with a transformer and its windings and tap changer.
@param FinancialInfo:
@param Properties: UserAttributes used to specify further properties of this asset. Use 'name' to specify what kind of property it is, and 'value.value' attribute for the actual value.
@param Measurements:
"""
#: Code for this type of asset.
self.corporateCode = corporateCode
#: Uniquely tracked commodity (UTC) number.
self.utcNumber = utcNumber
#: Condition of asset in inventory or at time of installation. Examples include new, rebuilt, overhaul required, other. Refer to inspection data for information on the most current condition of the asset.
self.initialCondition = initialCondition
#: Extension mechanism to accommodate utility-specific categorisation of Asset and its subtypes, according to their corporate standards, practices, and existing IT systems (e.g., for management of assets, maintenance, work, outage, customers, etc.).
self.category = category
#: Serial number of this asset.
self.serialNumber = serialNumber
#: True if asset is considered critical for some reason (for example, a pole with critical attachments).
self.critical = critical
#: The way this particular asset is being used in this installation. For example, the application of a bushing when attached to a specific transformer winding would be one of the following: H1, H2, H3, H0, X1, X2, X3, X0, Y1, Y2, Y3, Y0.
self.application = application
#: Purchase price of asset.
self.purchasePrice = purchasePrice
#: Whenever an asset is reconditioned, percentage of expected life for the asset when it was new; zero for new devices.
self.initialLossOfLife = initialLossOfLife
#: Date this asset was manufactured.
self.manufacturedDate = manufacturedDate
#: (if applicable) Date current installation was completed, which may not be the same as the in-service date. Asset may have been installed at other locations previously. Ignored if asset is (1) not currently installed (e.g., stored in a depot) or (2) not intended to be installed (e.g., vehicle, tool).
self.installationDate = installationDate
#: Lot number for this asset. Even for the same model and version number, many assets are manufactured in lots.
self.lotNumber = lotNumber
self._DocumentRoles = []
self.DocumentRoles = [] if DocumentRoles is None else DocumentRoles
self._Mediums = []
self.Mediums = [] if Mediums is None else Mediums
self._ErpRecDeliveryItems = []
self.ErpRecDeliveryItems = [] if ErpRecDeliveryItems is None else ErpRecDeliveryItems
self._Ratings = []
self.Ratings = [] if Ratings is None else Ratings
self._ToAssetRoles = []
self.ToAssetRoles = [] if ToAssetRoles is None else ToAssetRoles
self.electronicAddress = electronicAddress
self._FromAssetRoles = []
self.FromAssetRoles = [] if FromAssetRoles is None else FromAssetRoles
self._AssetFunctions = []
self.AssetFunctions = [] if AssetFunctions is None else AssetFunctions
self._ErpItemMaster = None
self.ErpItemMaster = ErpItemMaster
self._ErpInventory = None
self.ErpInventory = ErpInventory
self._ReliabilityInfos = []
self.ReliabilityInfos = [] if ReliabilityInfos is None else ReliabilityInfos
self._AssetInfo = None
self.AssetInfo = AssetInfo
self._WorkTask = None
self.WorkTask = WorkTask
self.acceptanceTest = acceptanceTest
self._ErpOrganisationRoles = []
self.ErpOrganisationRoles = [] if ErpOrganisationRoles is None else ErpOrganisationRoles
self._ScheduledEvents = []
self.ScheduledEvents = [] if ScheduledEvents is None else ScheduledEvents
self._AssetContainer = None
self.AssetContainer = AssetContainer
self._AssetPropertyCurves = []
self.AssetPropertyCurves = [] if AssetPropertyCurves is None else AssetPropertyCurves
self._ChangeItems = []
self.ChangeItems = [] if ChangeItems is None else ChangeItems
self._ActivityRecords = []
self.ActivityRecords = [] if ActivityRecords is None else ActivityRecords
self._Location = None
self.Location = Location
self.status = status
self._PowerSystemResources = []
self.PowerSystemResources = [] if PowerSystemResources is None else PowerSystemResources
self._FinancialInfo = None
self.FinancialInfo = FinancialInfo
self._Properties = []
self.Properties = [] if Properties is None else Properties
self._Measurements = []
self.Measurements = [] if Measurements is None else Measurements
super(Asset, self).__init__(*args, **kw_args)
_attrs = ["corporateCode", "utcNumber", "initialCondition", "category", "serialNumber", "critical", "application", "purchasePrice", "initialLossOfLife", "manufacturedDate", "installationDate", "lotNumber"]
_attr_types = {"corporateCode": str, "utcNumber": str, "initialCondition": str, "category": str, "serialNumber": str, "critical": bool, "application": str, "purchasePrice": float, "initialLossOfLife": float, "manufacturedDate": str, "installationDate": str, "lotNumber": str}
_defaults = {"corporateCode": '', "utcNumber": '', "initialCondition": '', "category": '', "serialNumber": '', "critical": False, "application": '', "purchasePrice": 0.0, "initialLossOfLife": 0.0, "manufacturedDate": '', "installationDate": '', "lotNumber": ''}
_enums = {}
_refs = ["DocumentRoles", "Mediums", "ErpRecDeliveryItems", "Ratings", "ToAssetRoles", "electronicAddress", "FromAssetRoles", "AssetFunctions", "ErpItemMaster", "ErpInventory", "ReliabilityInfos", "AssetInfo", "WorkTask", "acceptanceTest", "ErpOrganisationRoles", "ScheduledEvents", "AssetContainer", "AssetPropertyCurves", "ChangeItems", "ActivityRecords", "Location", "status", "PowerSystemResources", "FinancialInfo", "Properties", "Measurements"]
_many_refs = ["DocumentRoles", "Mediums", "ErpRecDeliveryItems", "Ratings", "ToAssetRoles", "FromAssetRoles", "AssetFunctions", "ReliabilityInfos", "ErpOrganisationRoles", "ScheduledEvents", "AssetPropertyCurves", "ChangeItems", "ActivityRecords", "PowerSystemResources", "Properties", "Measurements"]
def getDocumentRoles(self):
return self._DocumentRoles
def setDocumentRoles(self, value):
for x in self._DocumentRoles:
x.Asset = None
for y in value:
y._Asset = self
self._DocumentRoles = value
DocumentRoles = property(getDocumentRoles, setDocumentRoles)
def addDocumentRoles(self, *DocumentRoles):
for obj in DocumentRoles:
obj.Asset = self
def removeDocumentRoles(self, *DocumentRoles):
for obj in DocumentRoles:
obj.Asset = None
def getMediums(self):
return self._Mediums
def setMediums(self, value):
for p in self._Mediums:
filtered = [q for q in p.Assets if q != self]
self._Mediums._Assets = filtered
for r in value:
if self not in r._Assets:
r._Assets.append(self)
self._Mediums = value
Mediums = property(getMediums, setMediums)
def addMediums(self, *Mediums):
for obj in Mediums:
if self not in obj._Assets:
obj._Assets.append(self)
self._Mediums.append(obj)
def removeMediums(self, *Mediums):
for obj in Mediums:
if self in obj._Assets:
obj._Assets.remove(self)
self._Mediums.remove(obj)
def getErpRecDeliveryItems(self):
return self._ErpRecDeliveryItems
def setErpRecDeliveryItems(self, value):
for p in self._ErpRecDeliveryItems:
filtered = [q for q in p.Assets if q != self]
self._ErpRecDeliveryItems._Assets = filtered
for r in value:
if self not in r._Assets:
r._Assets.append(self)
self._ErpRecDeliveryItems = value
ErpRecDeliveryItems = property(getErpRecDeliveryItems, setErpRecDeliveryItems)
def addErpRecDeliveryItems(self, *ErpRecDeliveryItems):
for obj in ErpRecDeliveryItems:
if self not in obj._Assets:
obj._Assets.append(self)
self._ErpRecDeliveryItems.append(obj)
def removeErpRecDeliveryItems(self, *ErpRecDeliveryItems):
for obj in ErpRecDeliveryItems:
if self in obj._Assets:
obj._Assets.remove(self)
self._ErpRecDeliveryItems.remove(obj)
def getRatings(self):
"""UserAttributes used to specify ratings of this asset. Ratings also can be used to set the initial value of operational measurement limits. Use 'name' to specify what kind of rating it is (e.g., voltage, current), and 'value' attribute for the actual value and unit information of the rating.
"""
return self._Ratings
def setRatings(self, value):
for p in self._Ratings:
filtered = [q for q in p.RatingAssets if q != self]
self._Ratings._RatingAssets = filtered
for r in value:
if self not in r._RatingAssets:
r._RatingAssets.append(self)
self._Ratings = value
Ratings = property(getRatings, setRatings)
def addRatings(self, *Ratings):
for obj in Ratings:
if self not in obj._RatingAssets:
obj._RatingAssets.append(self)
self._Ratings.append(obj)
def removeRatings(self, *Ratings):
for obj in Ratings:
if self in obj._RatingAssets:
obj._RatingAssets.remove(self)
self._Ratings.remove(obj)
def getToAssetRoles(self):
return self._ToAssetRoles
def setToAssetRoles(self, value):
for x in self._ToAssetRoles:
x.FromAsset = None
for y in value:
y._FromAsset = self
self._ToAssetRoles = value
ToAssetRoles = property(getToAssetRoles, setToAssetRoles)
def addToAssetRoles(self, *ToAssetRoles):
for obj in ToAssetRoles:
obj.FromAsset = self
def removeToAssetRoles(self, *ToAssetRoles):
for obj in ToAssetRoles:
obj.FromAsset = None
# Electronic address.
electronicAddress = None
def getFromAssetRoles(self):
return self._FromAssetRoles
def setFromAssetRoles(self, value):
for x in self._FromAssetRoles:
x.ToAsset = None
for y in value:
y._ToAsset = self
self._FromAssetRoles = value
FromAssetRoles = property(getFromAssetRoles, setFromAssetRoles)
def addFromAssetRoles(self, *FromAssetRoles):
for obj in FromAssetRoles:
obj.ToAsset = self
def removeFromAssetRoles(self, *FromAssetRoles):
for obj in FromAssetRoles:
obj.ToAsset = None
def getAssetFunctions(self):
return self._AssetFunctions
def setAssetFunctions(self, value):
for x in self._AssetFunctions:
x.Asset = None
for y in value:
y._Asset = self
self._AssetFunctions = value
AssetFunctions = property(getAssetFunctions, setAssetFunctions)
def addAssetFunctions(self, *AssetFunctions):
for obj in AssetFunctions:
obj.Asset = self
def removeAssetFunctions(self, *AssetFunctions):
for obj in AssetFunctions:
obj.Asset = None
def getErpItemMaster(self):
return self._ErpItemMaster
def setErpItemMaster(self, value):
if self._ErpItemMaster is not None:
self._ErpItemMaster._Asset = None
self._ErpItemMaster = value
if self._ErpItemMaster is not None:
self._ErpItemMaster.Asset = None
self._ErpItemMaster._Asset = self
ErpItemMaster = property(getErpItemMaster, setErpItemMaster)
def getErpInventory(self):
return self._ErpInventory
def setErpInventory(self, value):
if self._ErpInventory is not None:
self._ErpInventory._Asset = None
self._ErpInventory = value
if self._ErpInventory is not None:
self._ErpInventory.Asset = None
self._ErpInventory._Asset = self
ErpInventory = property(getErpInventory, setErpInventory)
def getReliabilityInfos(self):
return self._ReliabilityInfos
def setReliabilityInfos(self, value):
for p in self._ReliabilityInfos:
filtered = [q for q in p.Assets if q != self]
self._ReliabilityInfos._Assets = filtered
for r in value:
if self not in r._Assets:
r._Assets.append(self)
self._ReliabilityInfos = value
ReliabilityInfos = property(getReliabilityInfos, setReliabilityInfos)
def addReliabilityInfos(self, *ReliabilityInfos):
for obj in ReliabilityInfos:
if self not in obj._Assets:
obj._Assets.append(self)
self._ReliabilityInfos.append(obj)
def removeReliabilityInfos(self, *ReliabilityInfos):
for obj in ReliabilityInfos:
if self in obj._Assets:
obj._Assets.remove(self)
self._ReliabilityInfos.remove(obj)
def getAssetInfo(self):
"""Data applicable to this asset.
"""
return self._AssetInfo
def setAssetInfo(self, value):
if self._AssetInfo is not None:
self._AssetInfo._Assets = None
self._AssetInfo = value
if self._AssetInfo is not None:
self._AssetInfo.Assets = None
self._AssetInfo._Assets = self
AssetInfo = property(getAssetInfo, setAssetInfo)
def getWorkTask(self):
return self._WorkTask
def setWorkTask(self, value):
if self._WorkTask is not None:
filtered = [x for x in self.WorkTask.Assets if x != self]
self._WorkTask._Assets = filtered
self._WorkTask = value
if self._WorkTask is not None:
if self not in self._WorkTask._Assets:
self._WorkTask._Assets.append(self)
WorkTask = property(getWorkTask, setWorkTask)
# Information on acceptance test.
acceptanceTest = None
def getErpOrganisationRoles(self):
return self._ErpOrganisationRoles
def setErpOrganisationRoles(self, value):
for x in self._ErpOrganisationRoles:
x.Asset = None
for y in value:
y._Asset = self
self._ErpOrganisationRoles = value
ErpOrganisationRoles = property(getErpOrganisationRoles, setErpOrganisationRoles)
def addErpOrganisationRoles(self, *ErpOrganisationRoles):
for obj in ErpOrganisationRoles:
obj.Asset = self
def removeErpOrganisationRoles(self, *ErpOrganisationRoles):
for obj in ErpOrganisationRoles:
obj.Asset = None
def getScheduledEvents(self):
return self._ScheduledEvents
def setScheduledEvents(self, value):
for p in self._ScheduledEvents:
filtered = [q for q in p.Assets if q != self]
self._ScheduledEvents._Assets = filtered
for r in value:
if self not in r._Assets:
r._Assets.append(self)
self._ScheduledEvents = value
ScheduledEvents = property(getScheduledEvents, setScheduledEvents)
def addScheduledEvents(self, *ScheduledEvents):
for obj in ScheduledEvents:
if self not in obj._Assets:
obj._Assets.append(self)
self._ScheduledEvents.append(obj)
def removeScheduledEvents(self, *ScheduledEvents):
for obj in ScheduledEvents:
if self in obj._Assets:
obj._Assets.remove(self)
self._ScheduledEvents.remove(obj)
def getAssetContainer(self):
return self._AssetContainer
def setAssetContainer(self, value):
if self._AssetContainer is not None:
filtered = [x for x in self.AssetContainer.Assets if x != self]
self._AssetContainer._Assets = filtered
self._AssetContainer = value
if self._AssetContainer is not None:
if self not in self._AssetContainer._Assets:
self._AssetContainer._Assets.append(self)
AssetContainer = property(getAssetContainer, setAssetContainer)
def getAssetPropertyCurves(self):
return self._AssetPropertyCurves
def setAssetPropertyCurves(self, value):
for p in self._AssetPropertyCurves:
filtered = [q for q in p.Assets if q != self]
self._AssetPropertyCurves._Assets = filtered
for r in value:
if self not in r._Assets:
r._Assets.append(self)
self._AssetPropertyCurves = value
AssetPropertyCurves = property(getAssetPropertyCurves, setAssetPropertyCurves)
def addAssetPropertyCurves(self, *AssetPropertyCurves):
for obj in AssetPropertyCurves:
if self not in obj._Assets:
obj._Assets.append(self)
self._AssetPropertyCurves.append(obj)
def removeAssetPropertyCurves(self, *AssetPropertyCurves):
for obj in AssetPropertyCurves:
if self in obj._Assets:
obj._Assets.remove(self)
self._AssetPropertyCurves.remove(obj)
def getChangeItems(self):
return self._ChangeItems
def setChangeItems(self, value):
for x in self._ChangeItems:
x.Asset = None
for y in value:
y._Asset = self
self._ChangeItems = value
ChangeItems = property(getChangeItems, setChangeItems)
def addChangeItems(self, *ChangeItems):
for obj in ChangeItems:
obj.Asset = self
def removeChangeItems(self, *ChangeItems):
for obj in ChangeItems:
obj.Asset = None
def getActivityRecords(self):
"""All activity records created for this asset.
"""
return self._ActivityRecords
def setActivityRecords(self, value):
for p in self._ActivityRecords:
filtered = [q for q in p.Assets if q != self]
self._ActivityRecords._Assets = filtered
for r in value:
if self not in r._Assets:
r._Assets.append(self)
self._ActivityRecords = value
ActivityRecords = property(getActivityRecords, setActivityRecords)
def addActivityRecords(self, *ActivityRecords):
for obj in ActivityRecords:
if self not in obj._Assets:
obj._Assets.append(self)
self._ActivityRecords.append(obj)
def removeActivityRecords(self, *ActivityRecords):
for obj in ActivityRecords:
if self in obj._Assets:
obj._Assets.remove(self)
self._ActivityRecords.remove(obj)
def getLocation(self):
"""Location of this asset.
"""
return self._Location
def setLocation(self, value):
if self._Location is not None:
filtered = [x for x in self.Location.Assets if x != self]
self._Location._Assets = filtered
self._Location = value
if self._Location is not None:
if self not in self._Location._Assets:
self._Location._Assets.append(self)
Location = property(getLocation, setLocation)
# Status of this asset.
status = None
def getPowerSystemResources(self):
"""All power system resources used to electrically model this asset. For example, transformer asset is electrically modelled with a transformer and its windings and tap changer.
"""
return self._PowerSystemResources
def setPowerSystemResources(self, value):
for p in self._PowerSystemResources:
filtered = [q for q in p.Assets if q != self]
self._PowerSystemResources._Assets = filtered
for r in value:
if self not in r._Assets:
r._Assets.append(self)
self._PowerSystemResources = value
PowerSystemResources = property(getPowerSystemResources, setPowerSystemResources)
def addPowerSystemResources(self, *PowerSystemResources):
for obj in PowerSystemResources:
if self not in obj._Assets:
obj._Assets.append(self)
self._PowerSystemResources.append(obj)
def removePowerSystemResources(self, *PowerSystemResources):
for obj in PowerSystemResources:
if self in obj._Assets:
obj._Assets.remove(self)
self._PowerSystemResources.remove(obj)
def getFinancialInfo(self):
return self._FinancialInfo
def setFinancialInfo(self, value):
if self._FinancialInfo is not None:
self._FinancialInfo._Asset = None
self._FinancialInfo = value
if self._FinancialInfo is not None:
self._FinancialInfo.Asset = None
self._FinancialInfo._Asset = self
FinancialInfo = property(getFinancialInfo, setFinancialInfo)
def getProperties(self):
"""UserAttributes used to specify further properties of this asset. Use 'name' to specify what kind of property it is, and 'value.value' attribute for the actual value.
"""
return self._Properties
def setProperties(self, value):
for p in self._Properties:
filtered = [q for q in p.PropertyAssets if q != self]
self._Properties._PropertyAssets = filtered
for r in value:
if self not in r._PropertyAssets:
r._PropertyAssets.append(self)
self._Properties = value
Properties = property(getProperties, setProperties)
def addProperties(self, *Properties):
for obj in Properties:
if self not in obj._PropertyAssets:
obj._PropertyAssets.append(self)
self._Properties.append(obj)
def removeProperties(self, *Properties):
for obj in Properties:
if self in obj._PropertyAssets:
obj._PropertyAssets.remove(self)
self._Properties.remove(obj)
def getMeasurements(self):
return self._Measurements
def setMeasurements(self, value):
for x in self._Measurements:
x.Asset = None
for y in value:
y._Asset = self
self._Measurements = value
Measurements = property(getMeasurements, setMeasurements)
def addMeasurements(self, *Measurements):
for obj in Measurements:
obj.Asset = self
def removeMeasurements(self, *Measurements):
for obj in Measurements:
obj.Asset = None
|
|
"""
# Support Code
## Standard Headers
"""
from __future__ import division,print_function
import sys,random,math
sys.dont_write_bytecode = True
from settingsWhere import *
"""
## Simple, low-level stuff
### Maths Stuff
"""
def gt(x,y): return x > y
def lt(x,y): return x < y
def medianIQR(lst, ordered=False):
if not ordered:
lst = sorted(lst)
n = len(lst)
q = n//4
iqr = lst[q*3] - lst[q]
if n % 2:
return lst[q*2],iqr
else:
p = max(0,q-1)
return (lst[p] + lst[q]) * 0.5,iqr
def median(lst,ordered=False):
return medianIQR(lst,ordered)[0]
"""
An accumulator for reporting on numbers.
"""
class N():
"Add/delete counts of numbers."
def __init__(i,inits=[]):
i.zero()
map(i.__iadd__,inits)
def zero(i):
i.n = i.mu = i.m2 = 0
i.cache= Cache()
def sd(i) :
if i.n < 2:
return 0
else:
return (max(0,i.m2)/(i.n - 1))**0.5
def __iadd__(i,x):
i.cache += x
i.n += 1
delta = x - i.mu
i.mu += delta/(1.0*i.n)
i.m2 += delta*(x - i.mu)
return i
def __isub__(i,x):
i.cache = Cache()
if i.n < 2: return i.zero()
i.n -= 1
delta = x - i.mu
i.mu -= delta/(1.0*i.n)
i.m2 -= delta*(x - i.mu)
return i
class Cache:
"Keep a random sample of stuff seen so far."
def __init__(i,inits=[]):
i.all,i.n,i._has = [],0,None
map(i.__iadd__,inits)
def __iadd__(i,x):
i.n += 1
if len(i.all) < 128: # if not full
i._has = None
i.all += [x] # then add
else: # otherwise, maybe replace an old item
if random.random() <= The.cache.size/i.n:
i._has=None
i.all[int(random.random()*The.cache.size)] = x
return i
def has(i):
if i._has == None:
lst = sorted(i.all)
med,iqr = medianIQR(i.all,ordered=True)
i._has = o(
median = med, iqr = iqr,
lo = i.all[0], hi = i.all[-1])
return i._has
"""
### Random stuff.
"""
by = lambda x: random.uniform(0,x)
rseed = random.seed
any = random.choice
rand = random.random
def seed(r=None):
global The
if The is None: The=defaults()
if r is None: r = The.seed
rseed(r)
"""
### List Handling Tricks
"""
def first(lst): return lst[0]
def second(lst): return lst[1]
def third(lst): return lst[2]
"""
### Printing Stuff
Print without newline:
"""
def say(*lst): print(*lst,end="")
"""
Print a list of numbers without an excess of decimal places:
"""
def gs(lst) : return [g(x) for x in lst]
def g(x) :
txt = '%g' % x
return int(txt) if int(x) == x else float(txt)
"""
Pretty print a dictionary:
"""
def showd(d):
def one(k,v):
if isinstance(v,list):
v = gs(v)
if isinstance(v,float):
return ":%s %g" % (k,v)
return ":%s %s" % (k,v)
return ' '.join([one(k,v) for k,v in
sorted(d.items())
if not "_" in k])
"""
## Decorator to run code at Start-up
"""
def go(f):
"A decorator that runs code at load time."
print("\n# ---|", f.__name__,"|-----------------")
if f.__doc__: print("#", f.__doc__)
f()
"""
## Handling command line options.
Convert command line to a function call.
e.g. if the file lib.py ends with
if __name__ == '__main__':eval(todo())
then
python lib.py myfun :a 1 :b fred
results in a call to _myfun(a=1,b='fred')_.
"""
def todo(com="print(The._logo,'WHERE (2.0) you at?')"):
import sys
if len(sys.argv) < 2: return com
def strp(x): return isinstance(x,basestring)
def wrap(x): return "'%s'"%x if strp(x) else str(x)
def oneTwo(lst):
while lst: yield lst.pop(0), lst.pop(0)
def value(x):
try: return eval(x)
except: return x
def two(x,y): return x[1:] +"="+wrap(value(y))
twos = [two(x,y) for x,y in oneTwo(sys.argv[2:])]
return sys.argv[1]+'(**dict('+ ','.join(twos)+'))'
"""
## More interesting, low-level stuff
"""
def timing(f,repeats=10):
"How long does 'f' take to run?"
import time
time1 = time.clock()
for _ in range(repeats):
f()
return (time.clock() - time1)*1.0/repeats
"""
## Data Completion Tool
Fills in some details on a table of data. For example,
def nasa93():
vl=1;l=2;n=3;h=4;vh=5;xh=6
return data(indep= [
'Prec', 'Flex', 'Resl', 'Team', 'Pmat', 'rely', 'data', 'cplx', 'ruse',
'docu', 'time', 'stor', 'pvol', 'acap', 'pcap', 'pcon', 'aexp', 'plex',
'ltex', 'tool', 'site', 'sced', 'kloc'],
less = ['effort', 'defects', 'months'],
_rows=[
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,25.9,117.6,808,15.3],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,24.6,117.6,767,15.0],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,7.7,31.2,240,10.1],
...
Adds in information on _cols_, _decisions_, _hi,lo_, etc:
{ :cols [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 22, 23, 24]
:decisions [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22]
:eval <function <lambda> at 0x7f3f825bea28>
:hi {0: 4, 1: 4, 2: 4, 3: 5, 4: 4, 5: 5, 6: 5,
7: 6, 8: 3, 9: 3, 10: 6, 11: 6, 12: 4, 13: 5,
14: 5, 15: 3, 16: 5, 17: 4, 18: 4, 19: 4,
20: 3, 21: 3, 22: 980, 23: 8211, 24: 50961}
:lo {0: 4, 1: 4, 2: 4, 3: 5, 4: 2, 5: 2, 6: 2,
7: 2, 8: 3, 9: 3, 10: 3, 11: 3, 12: 2,
13: 3, 14: 3, 15: 3, 16: 2, 17: 1, 18: 1,
19: 3, 20: 3, 21: 2, 22: 0.9, 23: 8.4, 24: 28}
:names ['Prec', 'Flex', 'Resl', 'Team', 'Pmat',
'rely', 'data', 'cplx', 'ruse', 'docu',
'time', 'stor', 'pvol', 'acap', 'pcap',
'pcon', 'aexp', 'plex', 'ltex', 'tool',
'site', 'sced', 'kloc', 'effort',
'defects', 'months']
:objectives [22, 23, 24]
:w {0: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1,
7: 1, 8: 1, 9: 1, 10: 1, 11: 1, 12: 1, 13: 1,
14: 1, 15: 1, 16: 1, 17: 1, 18: 1, 19: 1,
20: 1, 21: 1, 22: -1, 23: -1, 24: -1}
}
Code:
"""
def data(indep=[], less=[], more=[], _rows=[]):
nindep= len(indep)
ndep = len(less) + len(more)
m= o(lo={}, hi={}, w={},
eval = lambda m,it : True,
_rows = [o(cells=r,score=0,scored=False,
x0=None,y0=None)
for r in _rows],
names = indep+less+more)
m.decisions = [x for x in range(nindep)]
m.objectives = [nindep+ x- 1 for x in range(ndep)]
m.cols = m.decisions + m.objectives
for x in m.decisions :
m.w[x]= 1
for y,_ in enumerate(less) :
m.w[x+y] = -1
for z,_ in enumerate(more) :
m.w[x+y+z] = 1
for x in m.cols:
all = sorted(row.cells[x] for row in m._rows)
m.lo[x] = all[0]
m.hi[x] = all[-1]
return m
"""
## Start-up Actions
"""
if __name__ == '__main__': eval(todo())
|
|
from time import sleep
from typing import TYPE_CHECKING, Any, Optional
from upcloud_api.firewall import FirewallRule
from upcloud_api.ip_address import IPAddress
from upcloud_api.storage import Storage
from upcloud_api.utils import try_it_n_times
if TYPE_CHECKING:
from upcloud_api import CloudManager
def login_user_block(username, ssh_keys, create_password=False):
"""
Helper function for creating Server.login_user blocks.
(see: https://www.upcloud.com/api/8-servers/#create-server)
"""
block = {
'create_password': 'yes' if create_password else 'no',
'ssh_keys': {'ssh_key': ssh_keys},
}
if username:
block['username'] = username
return block
# TODO: should this inherit from UpcloudResource too?
class Server:
"""
Class representation of UpCloud Server instance.
Partially immutable class; only fields that are persisted with the `.save()` method may be set
with the server.field=value syntax. See __setattr__ override.
"""
cloud_manager: 'CloudManager'
#
# Functionality for partial immutability and repopulating the object from API.
#
updateable_fields = [
'boot_order',
'core_number',
'firewall',
'hostname',
'memory_amount',
'nic_model',
'title',
'timezone',
'video_model',
'vnc',
'vnc_password',
'plan',
]
optional_fields = [
'plan',
'core_number',
'memory_amount',
'boot_order',
'firewall',
'nic_model',
'timezone',
'video_model',
'vnc_password',
'password_delivery',
'avoid_host',
'login_user',
'user_data',
]
def __init__(self, server=None, **kwargs) -> None:
"""
Initialize Server.
Use _reset to set attributes.
Set title = hostname if title not given.
"""
object.__setattr__(self, 'populated', False)
self._reset(server, **kwargs)
if not hasattr(self, 'title') and hasattr(self, 'hostname'):
self.title = self.hostname
def __setattr__(self, name: str, value: Any) -> None:
"""
Override to prevent updating readonly fields.
"""
if name not in self.updateable_fields:
raise Exception(f"'{name}' is a readonly field")
else:
object.__setattr__(self, name, value)
def _reset(self, server, **kwargs) -> None:
"""
Reset the server object with new values given as params.
- server: a dict representing the server. e.g the API response.
- kwargs: any meta fields such as cloud_manager and populated.
Note: storage_devices and ip_addresses may be given in server as dicts or
in kwargs as lists containing Storage and IPAddress objects.
"""
if server:
# handle storage, ip_address dicts and tags if they exist
Server._handle_server_subobjs(server, kwargs.get('cloud_manager'))
for key in server:
object.__setattr__(self, key, server[key])
for key in kwargs:
object.__setattr__(self, key, kwargs[key])
def populate(self) -> 'Server':
"""
Sync changes from the API to the local object.
Note: syncs ip_addresses and storage_devices too (/server/uuid endpoint)
"""
server, IPAddresses, storages = self.cloud_manager.get_server_data(self.uuid)
self._reset(server, ip_addresses=IPAddresses, storage_devices=storages, populated=True)
return self
def __str__(self) -> str:
return self.uuid
#
# Main functionality, 1:1 with UpCloud's API
#
def save(self) -> None:
"""
Sync local changes in server's attributes to the API.
Note: DOES NOT sync IPAddresses and storage_devices,
use add_ip, add_storage, remove_ip, remove_storage instead.
"""
# dict comprehension that also works with 2.6
# http://stackoverflow.com/questions/21069668/alternative-to-dict-comprehension-prior-to-python-2-7
kwargs = {
field: getattr(self, field) for field in self.updateable_fields if hasattr(self, field)
}
self.cloud_manager.modify_server(self.uuid, **kwargs)
self._reset(kwargs)
def destroy(self):
"""
Destroy the server.
"""
self.cloud_manager.delete_server(self.uuid)
def shutdown(self, hard: bool = False, timeout: int = 30) -> None:
"""
Shutdown/stop the server. By default, issue a soft shutdown with a timeout of 30s.
After the a timeout a hard shutdown is performed if the server has not stopped.
Note: API responds immediately (unlike in start), with state: started.
This client will, however, set state as 'maintenance' to signal that the server is neither
started nor stopped.
"""
body = dict()
body['stop_server'] = {'stop_type': 'hard' if hard else 'soft', 'timeout': f'{timeout}'}
path = f'/server/{self.uuid}/stop'
self.cloud_manager.api.post_request(path, body)
object.__setattr__(self, 'state', 'maintenance')
def stop(self) -> None:
"""
Alias for shutdown.
"""
self.shutdown()
def start(self, timeout: int = 120) -> None:
"""
Start the server. Note: slow and blocking request.
The API waits for confirmation from UpCloud's IaaS backend before responding.
"""
path = f'/server/{self.uuid}/start'
self.cloud_manager.api.post_request(path, timeout=timeout)
object.__setattr__(self, 'state', 'started')
def restart(self, hard: bool = False, timeout: int = 30, force: bool = True) -> None:
"""
Restart the server. By default, issue a soft restart with a timeout of 30s
and a hard restart after the timeout.
After the a timeout a hard restart is performed if the server has not stopped.
Note: API responds immediately (unlike in start), with state: started.
This client will, however, set state as 'maintenance' to signal that the server is neither
started nor stopped.
"""
body = dict()
body['restart_server'] = {
'stop_type': 'hard' if hard else 'soft',
'timeout': f'{timeout}',
'timeout_action': 'destroy' if force else 'ignore',
}
path = f'/server/{self.uuid}/restart'
self.cloud_manager.api.post_request(path, body)
object.__setattr__(self, 'state', 'maintenance')
def add_ip(self, family: str = 'IPv4') -> IPAddress:
"""
Allocate a new (random) IP-address to the Server.
"""
IP = self.cloud_manager.attach_ip(self.uuid, family)
self.ip_addresses.append(IP)
return IP
def remove_ip(self, ip_address: IPAddress) -> None:
"""
Release the specified IP-address from the server.
"""
self.cloud_manager.release_ip(ip_address.address)
self.ip_addresses.remove(ip_address)
def add_storage(
self,
storage: Optional[Storage] = None, # TODO: this probably shouldn't be optional
type: str = 'disk',
address=None,
) -> None:
"""
Attach the given storage to the Server.
Default address is next available.
"""
self.cloud_manager.attach_storage(
server=self.uuid, storage=storage.uuid, storage_type=type, address=address
)
storage.address = address
storage.type = type
self.storage_devices.append(storage)
def remove_storage(self, storage: Storage) -> None:
"""
Remove Storage from a Server.
The Storage must be a reference to an object in
Server.storage_devices or the method will throw and Exception.
A Storage from get_storage(uuid) will not work as it is missing the 'address' property.
"""
if not hasattr(storage, 'address'):
raise Exception(
'Storage does not have an address. '
'Access the Storage via Server.storage_devices '
'so they include an address. '
'(This is due how the API handles Storages)'
)
self.cloud_manager.detach_storage(server=self.uuid, address=storage.address)
self.storage_devices.remove(storage)
def add_firewall_rule(self, firewall_rule: FirewallRule) -> FirewallRule:
"""
Add the specified FirewallRule to this server.
Returns a FirewallRule instance that is associated with this server instance.
Instantly calls the API, no need to call .save(). This is because firewall can not
be configured with the same request as the rest of the Server.
"""
return self.cloud_manager.create_firewall_rule(self, firewall_rule.to_dict())
def remove_firewall_rule(self, firewall_rule):
"""
Remove a firewall rule.
"""
return firewall_rule.destroy()
def get_firewall_rules(self):
"""
Return all FirewallRule instances that are associated with this server instance.
"""
return self.cloud_manager.get_firewall_rules(self)
def add_tags(self, tags):
"""
Add tags to a server. Accepts tags as strings or Tag objects.
"""
if self.cloud_manager.assign_tags(self.uuid, tags):
tags = self.tags + [str(tag) for tag in tags]
object.__setattr__(self, 'tags', tags)
def remove_tags(self, tags):
"""
Add tags to a server. Accepts tags as strings or Tag objects.
"""
if self.cloud_manager.remove_tags(self, tags):
new_tags = [tag for tag in self.tags if tag not in tags]
object.__setattr__(self, 'tags', new_tags)
#
# Helper and convenience functions.
# May perform several API requests and contain more complex logic.
#
def configure_firewall(self, FirewallRules):
"""
Helper function for automatically adding several FirewallRules in series.
"""
firewall_rule_bodies = [FirewallRule.to_dict() for FirewallRule in FirewallRules]
return self.cloud_manager.configure_firewall(self, firewall_rule_bodies)
def prepare_post_body(self):
"""
Prepare a JSON serializable dict from a Server instance with nested.
Storage instances.
"""
body = dict()
# mandatory
body['server'] = {
'hostname': self.hostname,
'zone': self.zone,
'title': self.title,
'storage_devices': {},
}
# optional fields
for optional_field in self.optional_fields:
if hasattr(self, optional_field):
body['server'][optional_field] = getattr(self, optional_field)
# set password_delivery default as 'none' to prevent API from sending
# emails (with credentials) about each created server
if not hasattr(self, 'password_delivery'):
body['server']['password_delivery'] = 'none'
# collect storage devices and create a unique title (see: Storage.title in API doc)
# for each of them
body['server']['storage_devices'] = {'storage_device': []}
storage_title_id = 0 # running number for unique storage titles
for storage in self.storage_devices:
if not hasattr(storage, 'os') or storage.os is None:
storage_title_id += 1
storage_body = storage.to_dict()
# setup default titles for storages unless the user has specified
# them at storage.title
if not hasattr(storage, 'title') or not storage.title:
if hasattr(storage, 'os') and storage.os:
storage_body['title'] = self.hostname + ' OS disk'
else:
storage_body['title'] = (
self.hostname + ' storage disk ' + str(storage_title_id)
)
# figure out the storage `action` parameter
# public template
if hasattr(storage, 'os') and storage.os:
storage_body['action'] = 'clone'
storage_body['storage'] = storage.os
# private template
elif hasattr(storage, 'uuid'):
storage_body['action'] = 'clone'
storage_body['storage'] = storage.uuid
# create a new storage
else:
storage_body['action'] = 'create'
body['server']['storage_devices']['storage_device'].append(storage_body)
if hasattr(self, 'ip_addresses') and self.ip_addresses:
body['server']['ip_addresses'] = {
'ip_address': [ip.to_dict() for ip in self.ip_addresses]
}
return body
def to_dict(self):
"""
Prepare a JSON serializable dict for read-only purposes.
Includes storages and IP-addresses.
Use prepare_post_body for POST and .save() for PUT.
"""
fields = dict(vars(self).items())
if self.populated:
fields['ip_addresses'] = []
fields['storage_devices'] = []
for ip in self.ip_addresses:
fields['ip_addresses'].append(
{'address': ip.address, 'access': ip.access, 'family': ip.family}
)
for storage in self.storage_devices:
fields['storage_devices'].append(
{
'address': storage.address,
'storage': storage.uuid,
'storage_size': storage.size,
'storage_title': storage.title,
'type': storage.type,
}
)
del fields['populated']
del fields['cloud_manager']
return fields
# TODO: strict is unused?
def get_ip(self, access='public', addr_family=None, strict=None):
"""
Return the server's IP address.
Params:
- addr_family: IPv4, IPv6 or None. None prefers IPv4 but will
return IPv6 if IPv4 addr was not available.
- access: 'public' or 'private'
"""
if addr_family not in ['IPv4', 'IPv6', None]:
raise Exception("`addr_family` must be 'IPv4', 'IPv6' or None")
if access not in ['private', 'public']:
raise Exception("`access` must be 'public' or 'private'")
if not hasattr(self, 'ip_addresses'):
self.populate()
# server can have several public or private IPs
ip_addrs = [ip_addr for ip_addr in self.ip_addresses if ip_addr.access == access]
# prefer addr_family (or IPv4 if none given)
preferred_family = addr_family if addr_family else 'IPv4'
for ip_addr in ip_addrs:
if ip_addr.family == preferred_family:
return ip_addr.address
# any IP (of the right access) will do if available and addr_family is None
return ip_addrs[0].address if ip_addrs and not addr_family else None
def get_public_ip(self, addr_family=None, *args, **kwargs):
"""Alias for get_ip('public')"""
return self.get_ip('public', addr_family, *args, **kwargs)
def get_private_ip(self, addr_family=None, *args, **kwargs):
"""Alias for get_ip('private')"""
return self.get_ip('private', addr_family, *args, **kwargs)
def _wait_for_state_change(self, target_states, update_interval=10):
"""
Blocking wait until target_state reached. update_interval is in seconds.
Warning: state change must begin before calling this method.
"""
while self.state not in target_states:
if self.state == 'error':
raise Exception('server is in error state')
# update server state every 10s
sleep(update_interval)
self.populate()
def ensure_started(self):
"""
Start a server and waits (blocking wait) until it is fully started.
"""
# server is either starting or stopping (or error)
if self.state in ['maintenance', 'error']:
self._wait_for_state_change(['stopped', 'started'])
if self.state == 'stopped':
self.start()
self._wait_for_state_change(['started'])
if self.state == 'started':
return True
else:
# something went wrong, fail explicitly
raise Exception('unknown server state: ' + self.state)
def stop_and_destroy(self, sync=True):
"""
Destroy a server and its storages. Stops the server before destroying.
Syncs the server state from the API, use sync=False to disable.
"""
def _self_destruct():
"""destroy the server and all storages attached to it."""
# try_it_n_times util is used as a convenience because
# Servers and Storages can fluctuate between "maintenance" and their
# original state due to several different reasons especially when
# destroying infrastructure.
# first destroy server
try_it_n_times(
operation=self.destroy,
expected_error_codes=['SERVER_STATE_ILLEGAL'],
custom_error='destroying server failed',
)
# storages may be deleted instantly after server DELETE
for storage in self.storage_devices:
try_it_n_times(
operation=storage.destroy,
expected_error_codes=['STORAGE_STATE_ILLEGAL'],
custom_error='destroying storage failed',
)
if sync:
self.populate()
# server is either starting or stopping (or error)
if self.state in ['maintenance', 'error']:
self._wait_for_state_change(['stopped', 'started'])
if self.state == 'started':
try_it_n_times(
operation=self.stop,
expected_error_codes=['SERVER_STATE_ILLEGAL'],
custom_error='stopping server failed',
)
self._wait_for_state_change(['stopped'])
if self.state == 'stopped':
_self_destruct()
else:
raise Exception('unknown server state: ' + self.state)
@classmethod
def _handle_server_subobjs(cls, server, cloud_manager):
ip_data = server.pop('ip_addresses', None)
storage_data = server.pop('storage_devices', None)
tags = server.pop('tags', None)
if ip_data:
ip_addresses = IPAddress._create_ip_address_objs(ip_data, cloud_manager=cloud_manager)
server['ip_addresses'] = ip_addresses
if storage_data:
storages = Storage._create_storage_objs(storage_data, cloud_manager=cloud_manager)
server['storage_devices'] = storages
if tags and 'tag' in tags:
server['tags'] = tags['tag']
@classmethod
def _create_server_obj(cls, server, cloud_manager):
cls._handle_server_subobjs(server, cloud_manager)
server_dict = dict()
server_dict.update(server)
server_dict['cloud_manager'] = cloud_manager
return Server(**server_dict)
|
|
import pytest
try:
from unittest.mock import Mock
except ImportError: # pragma: no cover
from mock import Mock
import collections
import openpathsampling as paths
from openpathsampling.tests.test_helpers import make_1d_traj
from openpathsampling.tests.analysis.utils.fixture_classes import (
TPSSystemFixture
)
from openpathsampling.tests.analysis.utils.mock_movers import *
from openpathsampling.tests.analysis.utils.mock_movers import (
_select_by_input_ensembles, _get_only
)
def test_get_only():
test_list = ['foo', 'bar', 'baz', 'qux']
only = _get_only(iterable=test_list,
condition=lambda s: s.startswith('f'),
error_msg="string starting with f")
assert only == 'foo'
def test_get_only_error():
test_list = ['foo', 'bar', 'baz', 'qux']
with pytest.raises(AnalysisTestSetupError, match="found 2"):
_get_only(test_list,
condition=lambda s: s.startswith('b'),
error_msg="string starting with b")
with pytest.raises(AnalysisTestSetupError, match="found 0"):
_get_only(test_list,
condition=lambda s: s.startswith('z'),
error_msg="string starting with z")
@pytest.mark.parametrize('ensemble', [None, 'ensemble'])
def test_select_by_input_ensembles(default_unidirectional_tis, ensemble):
scheme = default_unidirectional_tis.scheme
movers = scheme.movers['shooting']
if ensemble == 'ensemble':
ensemble = movers[0].input_ensembles[0]
selected = _select_by_input_ensembles(movers, ensemble)
assert selected in movers # this is enough for ensemble=None
if ensemble is not None:
assert selected == movers[0]
def test_random_choice_mover(default_unidirectional_tis):
scheme = default_unidirectional_tis.scheme
mover = scheme.movers['shooting'][0]
target_change = Mock(mover=mover)
shooting_choosers = [group for group in scheme.root_mover.submovers
if mover in group.submovers]
assert len(shooting_choosers) == 1
shooting_chooser = shooting_choosers[0]
# make sure we got the right one
assert shooting_chooser.name == "ShootingChooser"
random_choice = MockRandomChoiceMover(shooting_chooser, target_change)
change = random_choice('foo') # exact inputs are ignored
assert change.mover is shooting_chooser
assert change.subchanges[0] is target_change
def test_random_choice_mover_error(default_unidirectional_tis):
scheme = default_unidirectional_tis.scheme
mover = scheme.movers['repex'][0]
target_change = Mock(mover=mover)
shoot_mover = scheme.movers['shooting'][0]
shooting_choosers = [group for group in scheme.root_mover.submovers
if shoot_mover in group.submovers]
assert len(shooting_choosers) == 1
shooting_chooser = shooting_choosers[0]
# make sure we got the right one
assert shooting_chooser.name == "ShootingChooser"
random_choice = MockRandomChoiceMover(shooting_chooser, target_change)
with pytest.raises(AnalysisTestSetupError, match="not found as sub"):
random_choice('foo')
def _setup_one_way_forward(scheme, accepted):
make_trajectory = TPSSystemFixture.make_trajectory
partial_traj = make_trajectory(-1, 2).reversed
# * ensemble 0 is always accepted because the trial trajectory is
# shorter than the input trajectory
# * ensemble 2 is always rejected because the trial trajectory doesn't
# cross the interface (therefore doesn't satisfy the ensemble)
ensemble_idx = {True: 0, False: 2}[accepted]
ensemble = scheme.network.sampling_ensembles[ensemble_idx]
return ensemble, partial_traj
def _setup_one_way_backward(scheme, accepted):
ensemble = scheme.network.sampling_ensembles[2]
# * accepted trajectory always accepted because trial trajectory is
# shorter than the input trajectory
# * rejected trajectory always rejected because it is B->B (does not
# satisfy the ensemble)
make_trajectory = TPSSystemFixture.make_trajectory
partial_traj = {True: make_trajectory(-1, 1).reversed,
False: make_trajectory(-3, 10)}[accepted]
return ensemble, partial_traj
@pytest.mark.parametrize('direction', ['forward', 'backward'])
@pytest.mark.parametrize('accepted', [True, False])
def test_one_way_shooting_move(default_unidirectional_tis, direction,
accepted):
scheme = default_unidirectional_tis.scheme
t1 = default_unidirectional_tis.make_tis_trajectory(5)
t2 = default_unidirectional_tis.make_tis_trajectory(10)
init_conds = scheme.initial_conditions_from_trajectories([t1, t2])
shooting_idx = 4
ensemble, partial_traj = {
'forward': _setup_one_way_forward(scheme, accepted),
'backward': _setup_one_way_backward(scheme, accepted)
}[direction]
initial_traj = init_conds[ensemble].trajectory
len_expected = {
'forward': shooting_idx + 1 + len(partial_traj),
'backward': len(partial_traj) + len(initial_traj) - shooting_idx
}[direction]
trial_shooting_idx = {'forward': shooting_idx,
'backward': len(partial_traj)}[direction]
movetype = {'forward': MockForwardShooting,
'backward': MockBackwardShooting}[direction]
move = movetype(shooting_index=shooting_idx,
partial_traj=partial_traj,
scheme=scheme,
ensemble=ensemble)
assert move.ensemble == ensemble
change = move(init_conds)
# check that this looks like a move change from shooting
assert len(change.trials) == 1
trial_trajectory = change.trials[0].trajectory
shooting_snapshot = change.subchanges[0].details.shooting_snapshot
# check correctness of the results
assert change.accepted is accepted
assert len(trial_trajectory) == len_expected
assert shooting_snapshot is initial_traj[shooting_idx]
assert shooting_snapshot in trial_trajectory
assert shooting_snapshot is trial_trajectory[trial_shooting_idx]
@pytest.mark.parametrize('accepted', [True, False, None])
def test_shooting_move_force_accept(default_unidirectional_tis, accepted):
# If the a given trial can be either accepted or rejected, then the
# resulting change depends on the `accept` parameter of the mock move --
# True accepts, False rejects, and None uses the default internal math.
# Test this by trying each 25 times on a trial with a 50% acceptance
# probability.
# only test this with the forward shooting mover, since the logic is
# shared with backward
scheme = default_unidirectional_tis.scheme
init_traj = default_unidirectional_tis.make_tis_trajectory(10)
partial_traj = make_1d_traj([9.5] * (len(init_traj)-2) + [10.5])
init_conds = scheme.initial_conditions_from_trajectories(init_traj)
ensemble = scheme.network.sampling_ensembles[2]
shooting_idx = len(init_traj) - 2
move = MockForwardShooting(shooting_index=shooting_idx,
partial_traj=partial_traj,
accepted=accepted,
scheme=scheme,
ensemble=ensemble)
n_attempts = 25
changes = [move(init_conds) for _ in range(n_attempts)]
# TODO: add assert to check that the pick probability is as expected?
results = collections.Counter(change.accepted for change in changes)
if accepted is not None:
assert results[accepted] == n_attempts
assert not accepted not in results
else:
assert results[True] > 0
assert results[False] > 0
assert results[True] + results[False] == n_attempts
def test_reject_nonsense_forced_acceptance(default_unidirectional_tis):
# if a user requires that the shooting move be accepted, but the given
# trajectory cannot be accepted because it doesn't match the ensemble,
# we should raise an error
scheme = default_unidirectional_tis.scheme
init_traj = default_unidirectional_tis.make_tis_trajectory(10)
init_conds = scheme.initial_conditions_from_trajectories(init_traj)
# get a setup that should never be accepted
ensemble, partial_traj = _setup_one_way_backward(scheme, accepted=False)
shooting_idx = 4
move = MockBackwardShooting(shooting_index=shooting_idx,
partial_traj=partial_traj,
accepted=True,
scheme=scheme,
ensemble=ensemble)
with pytest.raises(AnalysisTestSetupError, match="force acceptance"):
move(init_conds)
@pytest.mark.parametrize('accepted', [True, False])
def test_repex_move(default_unidirectional_tis, accepted):
scheme = default_unidirectional_tis.scheme
t1 = default_unidirectional_tis.make_tis_trajectory(4)
t2 = default_unidirectional_tis.make_tis_trajectory(10)
init_conds = scheme.initial_conditions_from_trajectories([t1, t2])
assert init_conds[0].trajectory is t1 and init_conds[1].trajectory is t1
assert init_conds[2].trajectory is t2
if accepted:
e2, e1 = scheme.network.sampling_ensembles[:2]
else:
e2, e1 = scheme.network.sampling_ensembles[1:]
ensembles = e1, e2
move = MockRepex(scheme, ensembles)
change = move(init_conds)
assert change.accepted is accepted
assert isinstance(change.canonical.mover, paths.ReplicaExchangeMover)
@pytest.mark.parametrize('accepted', [True, False])
def test_mock_pathreversal(default_unidirectional_tis, accepted):
scheme = default_unidirectional_tis.scheme
move = MockPathReversal(scheme)
maxval = {True: 6, False: 10}[accepted]
init_traj = default_unidirectional_tis.make_tis_trajectory(maxval)
init_conds = scheme.initial_conditions_from_trajectories(init_traj)
change = move(init_conds)
assert change.accepted is accepted
assert isinstance(change.canonical.mover, paths.PathReversalMover)
@pytest.mark.parametrize('accepted', [True, False])
def test_wrap_org_by_group(default_unidirectional_tis, accepted):
# the specific mover doesn't matter; path reversal is easy
scheme = default_unidirectional_tis.scheme
move = MockPathReversal(scheme)
maxval = {True: 6, False: 10}[accepted]
init_traj = default_unidirectional_tis.make_tis_trajectory(maxval)
init_conds = scheme.initial_conditions_from_trajectories(init_traj)
canonical = move(init_conds)
wrapped = move.wrap_org_by_group(canonical, init_conds)
assert wrapped.mover is scheme.root_mover
assert wrapped.subchanges[0].mover.name == "PathreversalChooser"
assert wrapped.canonical is canonical
assert wrapped.subchanges[0].subchanges[0] is canonical
@pytest.mark.parametrize('accepted', [True, False])
def test_run_moves_single(default_unidirectional_tis, accepted):
# check that a single accepted step gives an MCStep with results that
# match the expected active for accepted/rejected steps
scheme = default_unidirectional_tis.scheme
make_tis_trajectory = default_unidirectional_tis.make_tis_trajectory
traj = {True: make_tis_trajectory(5),
False: make_tis_trajectory(10)}[accepted]
init_conds = scheme.initial_conditions_from_trajectories(traj)
ensemble = scheme.network.sampling_ensembles[0]
move = MockPathReversal(scheme, ensemble=ensemble)
steplist = list(run_moves(init_conds, [move]))
assert len(steplist) == 1
step = steplist[0]
assert isinstance(step, paths.MCStep)
assert isinstance(step.change.canonical.mover, paths.PathReversalMover)
assert step.change.accepted is accepted
if accepted:
assert step.active[ensemble].trajectory == traj.reversed
else:
assert step.active[ensemble] is init_conds[ensemble]
def test_run_moves_multiple(default_unidirectional_tis):
scheme = default_unidirectional_tis.scheme
traj = default_unidirectional_tis.make_tis_trajectory(10)
make_trajectory = default_unidirectional_tis.make_trajectory
init_conds = scheme.initial_conditions_from_trajectories(traj)
ensemble = scheme.network.sampling_ensembles[2]
moves = [
# first move is force-accepted (always accepted anyway)
MockForwardShooting(
shooting_index=8,
partial_traj=make_trajectory(8, 10),
accepted=True,
scheme=scheme,
ensemble=ensemble
),
# second move is rejected (bad ensemble)
MockBackwardShooting(
shooting_index=4,
partial_traj=make_trajectory(3, 10),
scheme=scheme,
ensemble=ensemble
),
# third move is force accepted
MockBackwardShooting(
shooting_index=9,
partial_traj=make_trajectory(-1, 8).reversed,
accepted=True,
scheme=scheme,
ensemble=ensemble
)
]
steps = list(run_moves(init_conds, moves))
initial_traj = init_conds[ensemble].trajectory
final_traj = steps[-1].active[ensemble].trajectory
assert len(steps) == 3
assert [step.change.accepted for step in steps] == [True, False, True]
assert not initial_traj.is_correlated(final_traj)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import urllib
from nose.tools import assert_true, assert_equal, assert_false
from django.utils.encoding import smart_str
from django.contrib.auth.models import User, Group
from django.core.urlresolvers import reverse
from desktop.lib.django_test_util import make_logged_in_client, assert_equal_mod_whitespace
from desktop.lib.test_utils import add_permission, grant_access
from hadoop.pseudo_hdfs4 import is_live_cluster
from metastore import parser
from useradmin.models import HuePermission, GroupPermission, group_has_permission
from beeswax.conf import LIST_PARTITIONS_LIMIT
from beeswax.views import collapse_whitespace
from beeswax.test_base import make_query, wait_for_query_to_finish, verify_history, get_query_server_config, fetch_query_result_data
from beeswax.models import QueryHistory
from beeswax.server import dbms
from beeswax.test_base import BeeswaxSampleProvider
LOG = logging.getLogger(__name__)
def _make_query(client, query, submission_type="Execute",
udfs=None, settings=None, resources=[],
wait=False, name=None, desc=None, local=True,
is_parameterized=True, max=30.0, database='default', email_notify=False, **kwargs):
"""Wrapper around the real make_query"""
res = make_query(client, query, submission_type,
udfs, settings, resources,
wait, name, desc, local, is_parameterized, max, database, email_notify, **kwargs)
# Should be in the history if it's submitted.
if submission_type == 'Execute':
fragment = collapse_whitespace(smart_str(query[:20]))
verify_history(client, fragment=fragment)
return res
class TestMetastoreWithHadoop(BeeswaxSampleProvider):
requires_hadoop = True
def setUp(self):
user = User.objects.get(username='test')
self.db = dbms.get(user, get_query_server_config())
add_permission("test", "test", "write", "metastore")
def test_basic_flow(self):
# Default database should exist
response = self.client.get("/metastore/databases")
assert_true(self.db_name in response.context["databases"])
# Table should have been created
response = self.client.get("/metastore/tables/")
assert_equal(200, response.status_code)
# Switch databases
response = self.client.get("/metastore/tables/%s?format=json" % self.db_name)
data = json.loads(response.content)
assert_true('name' in data["tables"][0])
assert_true("test" in data["table_names"])
# Should default to "default" database
response = self.client.get("/metastore/tables/not_there")
assert_equal(200, response.status_code)
# And have detail
response = self.client.get("/metastore/table/%s/test?format=json" % self.db_name)
data = json.loads(response.content)
assert_true("foo" in [col['name'] for col in data['cols']])
assert_true("SerDe Library:" in [prop['col_name'] for prop in data['properties']], data)
# Remember the number of history items. Use a generic fragment 'test' to pass verification.
history_cnt = verify_history(self.client, fragment='test')
# Show table data.
response = self.client.get("/metastore/table/%s/test/read" % self.db_name, follow=True)
response = self.client.get(reverse("beeswax:api_watch_query_refresh_json", kwargs={'id': response.context['query'].id}), follow=True)
response = wait_for_query_to_finish(self.client, response, max=30.0)
# Note that it may not return all rows at once. But we expect at least 10.
results = fetch_query_result_data(self.client, response)
assert_true(len(results['results']) > 0)
# This should NOT go into the query history.
assert_equal(verify_history(self.client, fragment='test'), history_cnt, 'Implicit queries should not be saved in the history')
def test_show_tables(self):
hql = """
CREATE TABLE test_show_tables_1 (a int) COMMENT 'Test for show_tables';
CREATE TABLE test_show_tables_2 (a int) COMMENT 'Test for show_tables';
CREATE TABLE test_show_tables_3 (a int) COMMENT 'Test for show_tables';
"""
resp = _make_query(self.client, hql, database=self.db_name)
resp = wait_for_query_to_finish(self.client, resp, max=30.0)
# Table should have been created
response = self.client.get("/metastore/tables/%s?filter=show_tables&format=json" % self.db_name)
assert_equal(200, response.status_code)
data = json.loads(response.content)
assert_equal(len(data['tables']), 3)
assert_true('name' in data["tables"][0])
assert_true('comment' in data["tables"][0])
assert_true('type' in data["tables"][0])
hql = """
CREATE TABLE test_show_tables_4 (a int) COMMENT 'Test for show_tables';
CREATE TABLE test_show_tables_5 (a int) COMMENT 'Test for show_tables';
"""
resp = _make_query(self.client, hql, database=self.db_name)
resp = wait_for_query_to_finish(self.client, resp, max=30.0)
# Table should have been created
response = self.client.get("/metastore/tables/%s?filter=show_tables&format=json" % self.db_name)
assert_equal(200, response.status_code)
data = json.loads(response.content)
assert_equal(len(data['tables']), 5)
assert_true('name' in data["tables"][0])
assert_true('comment' in data["tables"][0])
assert_true('type' in data["tables"][0])
hql = """
CREATE INDEX test_index ON TABLE test_show_tables_1 (a) AS 'COMPACT' WITH DEFERRED REBUILD;
"""
resp = _make_query(self.client, hql, wait=True, local=False, max=30.0, database=self.db_name)
# By default, index table should not appear in show tables view
response = self.client.get("/metastore/tables/%s?format=json" % self.db_name)
assert_equal(200, response.status_code)
data = json.loads(response.content)
assert_false('test_index' in data['tables'])
def test_describe_view(self):
resp = self.client.get('/metastore/table/%s/myview?format=json' % self.db_name)
assert_equal(200, resp.status_code, resp.content)
data = json.loads(resp.content)
assert_true(data['is_view'])
assert_equal("myview", data['name'])
def test_describe_partitions(self):
response = self.client.get("/metastore/table/%s/test_partitions?format=json" % self.db_name)
data = json.loads(response.content)
assert_equal(2, len(data['partition_keys']), data)
response = self.client.get("/metastore/table/%s/test_partitions/partitions?format=json" % self.db_name, follow=True)
data = json.loads(response.content)
partition_columns = [col for cols in data['partition_values_json'] for col in cols['columns']]
assert_true("baz_one" in partition_columns)
assert_true("boom_two" in partition_columns)
assert_true("baz_foo" in partition_columns)
assert_true("boom_bar" in partition_columns)
# Not partitioned
response = self.client.get("/metastore/table/%s/test/partitions" % self.db_name, follow=True)
assert_true("is not partitioned." in response.content)
def test_describe_partitioned_table_with_limit(self):
# We have 2 partitions in the test table
finish = LIST_PARTITIONS_LIMIT.set_for_testing("1")
try:
response = self.client.get("/metastore/table/%s/test_partitions/partitions" % self.db_name)
partition_values_json = json.loads(response.context['partition_values_json'])
assert_equal(1, len(partition_values_json))
finally:
finish()
finish = LIST_PARTITIONS_LIMIT.set_for_testing("3")
try:
response = self.client.get("/metastore/table/%s/test_partitions/partitions" % self.db_name)
partition_values_json = json.loads(response.context['partition_values_json'])
assert_equal(2, len(partition_values_json))
finally:
finish()
def test_read_partitions(self):
partition_spec = "baz='baz_one',boom='boom_two'"
response = self.client.get("/metastore/table/%s/test_partitions/partitions/%s/read" % (self.db_name, partition_spec), follow=True)
response = self.client.get(reverse("beeswax:api_watch_query_refresh_json", kwargs={'id': response.context['query'].id}), follow=True)
response = wait_for_query_to_finish(self.client, response, max=30.0)
results = fetch_query_result_data(self.client, response)
assert_true(len(results['results']) > 0, results)
def test_browse_partition(self):
partition_spec = "baz='baz_one',boom='boom_two'"
response = self.client.get("/metastore/table/%s/test_partitions/partitions/%s/browse" % (self.db_name, partition_spec), follow=True)
if is_live_cluster():
path = '/user/hive/warehouse/%s.db/test_partitions/baz=baz_one/boom=boom_two' % self.db_name
else:
path = '/user/hive/warehouse/test_partitions/baz=baz_one/boom=boom_two'
filebrowser_path = urllib.unquote(reverse("filebrowser.views.view", kwargs={'path': path}))
assert_equal(response.request['PATH_INFO'], filebrowser_path)
def test_drop_partition(self):
# Create partition first
partition_spec = "baz='baz_drop',boom='boom_drop'"
hql = 'ALTER TABLE `%s`.`test_partitions` ADD IF NOT EXISTS PARTITION (%s);' % (self.db_name, partition_spec)
resp = _make_query(self.client, hql, database=self.db_name)
wait_for_query_to_finish(self.client, resp, max=30.0)
# Assert partition exists
response = self.client.get("/metastore/table/%s/test_partitions/partitions" % self.db_name)
assert_true("baz_drop" in response.content)
# Drop partition
self.client.post("/metastore/table/%s/test_partitions/partitions/drop" % self.db_name, {'partition_selection': [partition_spec]}, follow=True)
query = QueryHistory.objects.latest('id')
assert_equal_mod_whitespace("ALTER TABLE `%s`.`test_partitions` DROP IF EXISTS PARTITION (%s) PURGE" % (self.db_name, partition_spec), query.query)
response = self.client.get("/metastore/table/%s/test_partitions/partitions" % self.db_name)
assert_false("baz_drop" in response.content)
def test_drop_multi_tables(self):
hql = """
CREATE TABLE test_drop_1 (a int);
CREATE TABLE test_drop_2 (a int);
CREATE TABLE test_drop_3 (a int);
"""
resp = _make_query(self.client, hql, database=self.db_name)
resp = wait_for_query_to_finish(self.client, resp, max=30.0)
# Drop them
resp = self.client.get('/metastore/tables/drop/%s' % self.db_name, follow=True)
assert_true('want to delete' in resp.content, resp.content)
resp = self.client.post('/metastore/tables/drop/%s' % self.db_name, {u'table_selection': [u'test_drop_1', u'test_drop_2', u'test_drop_3']})
assert_equal(resp.status_code, 302)
def test_drop_multi_databases(self):
db1 = '%s_test_drop_1' % self.db_name
db2 = '%s_test_drop_2' % self.db_name
db3 = '%s_test_drop_3' % self.db_name
try:
hql = """
CREATE DATABASE %(db1)s;
CREATE DATABASE %(db2)s;
CREATE DATABASE %(db3)s;
""" % {'db1': db1, 'db2': db2, 'db3': db3}
resp = _make_query(self.client, hql)
resp = wait_for_query_to_finish(self.client, resp, max=30.0)
# Drop them
resp = self.client.get('/metastore/databases/drop', follow=True)
assert_true('want to delete' in resp.content, resp.content)
resp = self.client.post('/metastore/databases/drop', {u'database_selection': [db1, db2, db3]})
assert_equal(resp.status_code, 302)
finally:
make_query(self.client, 'DROP DATABASE IF EXISTS %(db)s' % {'db': db1}, wait=True)
make_query(self.client, 'DROP DATABASE IF EXISTS %(db)s' % {'db': db2}, wait=True)
make_query(self.client, 'DROP DATABASE IF EXISTS %(db)s' % {'db': db3}, wait=True)
def test_load_data(self):
"""
Test load data queries.
These require Hadoop, because they ask the metastore
about whether a table is partitioned.
"""
# Check that view works
resp = self.client.get("/metastore/table/%s/test/load" % self.db_name, follow=True)
assert_true('Path' in resp.content)
data_path = '%(prefix)s/tmp/foo' % {'prefix': self.cluster.fs_prefix}
# Try the submission
self.client.post("/metastore/table/%s/test/load" % self.db_name, {'path': data_path, 'overwrite': True}, follow=True)
query = QueryHistory.objects.latest('id')
assert_equal_mod_whitespace("LOAD DATA INPATH '%(data_path)s' OVERWRITE INTO TABLE `%(db)s`.`test`" % {'data_path': data_path, 'db': self.db_name}, query.query)
resp = self.client.post("/metastore/table/%s/test/load" % self.db_name, {'path': data_path, 'overwrite': False}, follow=True)
query = QueryHistory.objects.latest('id')
assert_equal_mod_whitespace("LOAD DATA INPATH '%(data_path)s' INTO TABLE `%(db)s`.`test`" % {'data_path': data_path, 'db': self.db_name}, query.query)
# Try it with partitions
resp = self.client.post("/metastore/table/%s/test_partitions/load" % self.db_name, {'path': data_path, 'partition_0': "alpha", 'partition_1': "beta"}, follow=True)
query = QueryHistory.objects.latest('id')
assert_equal_mod_whitespace(query.query, "LOAD DATA INPATH '%(data_path)s' INTO TABLE `%(db)s`.`test_partitions` PARTITION (baz='alpha', boom='beta')" % {'data_path': data_path, 'db': self.db_name})
def test_has_write_access_frontend(self):
client = make_logged_in_client(username='write_access_frontend', groupname='write_access_frontend', is_superuser=False)
grant_access("write_access_frontend", "write_access_frontend", "metastore")
user = User.objects.get(username='write_access_frontend')
def check(client, assertz):
response = client.get("/metastore/databases")
assertz("Drop</button>" in response.content, response.content)
assertz("Create a new database" in response.content, response.content)
response = client.get("/metastore/tables/")
assertz("Drop</button>" in response.content, response.content)
assertz("Create a new table" in response.content, response.content)
check(client, assert_false)
# Add access
group, created = Group.objects.get_or_create(name='write_access_frontend')
perm, created = HuePermission.objects.get_or_create(app='metastore', action='write')
GroupPermission.objects.get_or_create(group=group, hue_permission=perm)
check(client, assert_true)
def test_has_write_access_backend(self):
client = make_logged_in_client(username='write_access_backend', groupname='write_access_backend', is_superuser=False)
grant_access("write_access_backend", "write_access_backend", "metastore")
grant_access("write_access_backend", "write_access_backend", "beeswax")
user = User.objects.get(username='write_access_backend')
resp = _make_query(client, 'CREATE TABLE test_perm_1 (a int);', database=self.db_name) # Only fails if we were using Sentry and won't allow SELECT to user
resp = wait_for_query_to_finish(client, resp, max=30.0)
def check(client, http_codes):
resp = client.get('/metastore/tables/drop/%s' % self.db_name)
assert_true(resp.status_code in http_codes, resp.content)
resp = client.post('/metastore/tables/drop/%s' % self.db_name, {u'table_selection': [u'test_perm_1']})
assert_true(resp.status_code in http_codes, resp.content)
check(client, [301]) # Denied
# Add access
group, created = Group.objects.get_or_create(name='write_access_backend')
perm, created = HuePermission.objects.get_or_create(app='metastore', action='write')
GroupPermission.objects.get_or_create(group=group, hue_permission=perm)
check(client, [200, 302]) # Ok
def test_alter_table(self):
resp = _make_query(self.client, "CREATE TABLE test_alter_table (a int) COMMENT 'Before Alter';", database=self.db_name)
resp = wait_for_query_to_finish(self.client, resp, max=30.0)
resp = self.client.get('/metastore/table/%s/test_alter_table' % self.db_name)
assert_true('test_alter_table', resp.content)
assert_true('Before Alter', resp.content)
# Alter name
resp = self.client.post(reverse("metastore:alter_table",
kwargs={'database': self.db_name, 'table': 'test_alter_table'}),
{'new_table_name': 'table_altered'})
json_resp = json.loads(resp.content)
assert_equal('table_altered', json_resp['data']['name'], json_resp)
# Alter comment
resp = self.client.post(reverse("metastore:alter_table",
kwargs={'database': self.db_name, 'table': 'table_altered'}),
{'comment': 'After Alter'})
json_resp = json.loads(resp.content)
assert_equal('After Alter', json_resp['data']['comment'], json_resp)
# Invalid table name returns error response
resp = self.client.post(reverse("metastore:alter_table",
kwargs={'database': self.db_name, 'table': 'table_altered'}),
{'new_table_name': 'bad name'})
json_resp = json.loads(resp.content)
assert_equal(1, json_resp['status'], json_resp)
assert_true('Failed to alter table' in json_resp['data'], json_resp)
def test_alter_column(self):
resp = _make_query(self.client, 'CREATE TABLE test_alter_column (before_alter int);', database=self.db_name)
resp = wait_for_query_to_finish(self.client, resp, max=30.0)
resp = self.client.get('/metastore/table/%s/test_alter_column' % self.db_name)
assert_true('before_alter', resp.content)
assert_true('int', resp.content)
# Alter name, type and comment
resp = self.client.post(reverse("metastore:alter_column",
kwargs={'database': self.db_name, 'table': 'test_alter_column'}),
{'column': 'before_alter', 'new_column_name': 'after_alter', 'new_column_type': 'string', 'comment': 'alter comment'})
json_resp = json.loads(resp.content)
assert_equal('after_alter', json_resp['data']['name'], json_resp)
assert_equal('string', json_resp['data']['type'], json_resp)
assert_equal('alter comment', json_resp['data']['comment'], json_resp)
# Invalid column type returns error response
resp = self.client.post(reverse("metastore:alter_column",
kwargs={'database': self.db_name, 'table': 'test_alter_column'}),
{'column': 'before_alter', 'new_column_name': 'foo'})
json_resp = json.loads(resp.content)
assert_equal(1, json_resp['status'], json_resp)
assert_true('Failed to alter column' in json_resp['message'], json_resp)
class TestParser(object):
def test_parse_simple(self):
name = 'simple'
type = 'string'
comment = 'test_parse_simple'
column = {'name': name, 'type': type, 'comment': comment}
parse_tree = parser.parse_column(name, type, comment)
assert_equal(parse_tree, column)
def test_parse_decimal(self):
name = 'simple'
type = 'decimal(12,2)'
comment = 'test_parse_decimal'
column = {'name': name, 'type': type, 'comment': comment}
parse_tree = parser.parse_column(name, type, comment)
assert_equal(parse_tree, column)
def test_parse_array(self):
name = 'array'
type = 'array<string>'
comment = 'test_parse_array'
column = {'name': name, 'type': 'array', 'comment': comment, 'item': {'type': 'string'}}
parse_tree = parser.parse_column(name, type, comment)
assert_equal(parse_tree, column)
def test_parse_map(self):
name = 'map'
type = 'map<string,int>'
comment = 'test_parse_map'
column = {'name': name, 'type': 'map', 'comment': comment, 'key': {'type': 'string'}, 'value': {'type': 'int'}}
parse_tree = parser.parse_column(name, type, comment)
assert_equal(parse_tree, column)
def test_parse_struct(self):
name = 'struct'
type = 'struct<name:string,age:int>'
comment = 'test_parse_struct'
column = {'name': name, 'type': 'struct', 'comment': comment, 'fields': [{'name': 'name', 'type': 'string'}, {'name': 'age', 'type': 'int'}]}
parse_tree = parser.parse_column(name, type, comment)
assert_equal(parse_tree, column)
def test_parse_nested(self):
name = 'nested'
type = 'array<struct<name:string,age:int>>'
comment = 'test_parse_nested'
column = {'name': name, 'type': 'array', 'comment': comment, 'item': {'type': 'struct', 'fields': [{'name': 'name', 'type': 'string'}, {'name': 'age', 'type': 'int'}]}}
parse_tree = parser.parse_column(name, type, comment)
assert_equal(parse_tree, column)
|
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Graphic User Interface."""
import sys
import os
import glob
import ConfigParser
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from froi.version import __version__
from base.labelconfig import LabelConfig
from base.utils import get_icon_dir
from component.listwidget import LayerView
from component.gridwidget import GridView
from component.orthwidget import OrthView
from component.datamodel import VolumeListModel
from component.drawsettings import PainterStatus, ViewSettings, MoveSettings
from component.binarizationdialog import BinarizationDialog
from component.intersectdialog import IntersectDialog
from component.localmaxdialog import LocalMaxDialog
from component.no_gui_tools import inverse_image, gen_label_color
from component.smoothingdialog import SmoothingDialog
from component.growdialog import GrowDialog
from component.watersheddialog import WatershedDialog
from component.slicdialog import SLICDialog
from component.clusterdialog import ClusterDialog
from component.regularroidialog import RegularROIDialog
from component.regularroifromcsvfiledialog import RegularROIFromCSVFileDialog
from component.roi2gwmidialog import Roi2gwmiDialog
from component.no_gui_tools import edge_detection
from component.roimergedialog import ROIMergeDialog
from component.opendialog import OpenDialog
from component.labelmanagedialog import LabelManageDialog
from component.labelconfigcenter import LabelConfigCenter
from component.roidialog import ROIDialog
from component.atlasdialog import AtlasDialog
from component.binaryerosiondialog import BinaryerosionDialog
from component.binarydilationdialog import BinarydilationDialog
from component.greydilationdialog import GreydilationDialog
from component.greyerosiondialog import GreyerosionDialog
from component.meants import MeanTSDialog
from component.voxelstatsdialog import VoxelStatsDialog
from component.registervolume import RegisterVolumeDialog
class BpMainWindow(QMainWindow):
"""Class BpMainWindow provides UI interface of FreeROI.
Example:
--------
>>> from PyQt4.QtGui import QApplication
>>> import main
>>> app = QApplication([])
>>> win = main.BpMainWindow()
......
>>> win.show()
>>> app.exec_()
"""
def __init__(self, parent=None):
"""Initialize an instance of BpMainWindow."""
# Inherited from QMainWindow
if sys.platform == 'darwin':
# Workaround for Qt issue on OSX that causes QMainWindow to
# hide when adding QToolBar, see
# https://bugreports.qt-project.org/browse/QTBUG-4300
super(BpMainWindow, self).__init__(parent,
Qt.MacWindowToolBarButtonHint)
else:
super(BpMainWindow, self).__init__(parent)
# temporary variable
self._temp_dir = None
self.is_save_configure = False
# pre-define a model variable
self.model = None
def config_extra_settings(self, data_dir):
"""Set data directory and update some configurations."""
# load data directory configuration
self.label_path = data_dir
self.label_config_dir = os.path.join(self.label_path, 'labelconfig')
self.label_config_suffix = 'lbl'
# set icon configuration
self._icon_dir = get_icon_dir()
# set window title
self.setWindowTitle('FreeROI')
#self.resize(1280, 1000)
self.center()
# set window icon
self.setWindowIcon(QIcon(os.path.join(self._icon_dir, 'logo.png')))
self._init_configuration()
# create actions
self._create_actions()
# create menus
self._create_menus()
def center(self):
"""Display main window in the center of screen."""
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def _init_configuration(self):
"""Load configuration for GUI."""
config_file = os.path.expanduser('~/.froi.conf')
if os.path.exists(config_file):
config = ConfigParser.RawConfigParser()
config.read(config_file)
self.window_width = config.getint('width', 'int')
self.window_height = config.getint('height', 'int')
self.orth_scale_factor = config.getint('orth_scale', 'int')
self.grid_scale_factor = config.getint('grid_scale', 'int')
self.window_xpos = config.getint('xpos', 'int')
self.window_ypos = config.getint('ypos', 'int')
self.resize(self.window_width, self.window_height)
self.move(self.window_xpos, self.window_ypos)
self.default_orth_scale_factor = float(self.orth_scale_factor) / 100
self.default_grid_scale_factor = float(self.grid_scale_factor) / 100
else:
self.setWindowState(Qt.WindowMaximized)
self.default_orth_scale_factor = 1.0
self.default_grid_scale_factor = 2.0
def _save_configuration(self):
"""Save GUI configuration to a file."""
config_file = os.path.expanduser('~/.freeroi.conf')
config = ConfigParser.RawConfigParser()
config.add_section('width')
config.add_section('height')
config.add_section('orth_scale')
config.add_section('grid_scale')
config.add_section('xpos')
config.add_section('ypos')
config.set('width', 'int', self.width())
config.set('height', 'int', self.height())
config.set('xpos', 'int', self.x())
config.set('ypos', 'int', self.y())
if hasattr(self, 'model') and isinstance(self.model, VolumeListModel):
config.set('orth_scale', 'int',
int(self.model.get_scale_factor('orth')*100))
config.set('grid_scale', 'int',
int(self.model.get_scale_factor('grid')*100))
else:
config.set('orth_scale', 'int',
int(self.default_orth_scale_factor * 100))
config.set('grid_scale', 'int',
int(self.default_grid_scale_factor * 100))
with open(config_file, 'wb') as conf:
config.write(conf)
def closeEvent(self, e):
if self.is_save_configure:
self._save_configuration()
e.accept()
def _create_actions(self):
"""Create actions."""
# create a dictionary to store actions info
self._actions = {}
# Open template action
self._actions['add_template'] = QAction(QIcon(os.path.join(
self._icon_dir, 'open.png')),
self.tr("&Open standard template"),
self)
self._actions['add_template'].setShortcut(self.tr("Ctrl+O"))
self._actions['add_template'].triggered.connect(self._add_template)
self._actions['add_template'].setEnabled(True)
# Add a new image action
self._actions['add_image'] = QAction(QIcon(os.path.join(
self._icon_dir, 'add.png')),
self.tr("&Add file ... "),
self)
self._actions['add_image'].setShortcut(self.tr("Ctrl+A"))
self._actions['add_image'].triggered.connect(self._add_image)
self._actions['add_image'].setEnabled(True)
# Remove an image
self._actions['remove_image'] = QAction(QIcon(os.path.join(
self._icon_dir, 'remove.png')),
self.tr("&Remove image"),
self)
self._actions['remove_image'].setShortcut(self.tr("Ctrl+R"))
self._actions['remove_image'].triggered.connect(self._remove_image)
self._actions['remove_image'].setEnabled(False)
# New image
self._actions['new_image'] = QAction(QIcon(os.path.join(
self._icon_dir, 'create.png')),
self.tr("&New image"),
self)
self._actions['new_image'].setShortcut(self.tr("Ctrl+N"))
self._actions['new_image'].triggered.connect(self.__new_image)
self._actions['new_image'].setEnabled(False)
# Duplicate image
self._actions['duplicate_image'] = QAction(self.tr("Duplicate"), self)
self._actions['duplicate_image'].triggered.connect(
self._duplicate_image)
self._actions['duplicate_image'].setEnabled(False)
# Save image
self._actions['save_image'] = QAction(QIcon(os.path.join(
self._icon_dir, 'save.png')),
self.tr("&Save image as..."),
self)
self._actions['save_image'].setShortcut(self.tr("Ctrl+S"))
self._actions['save_image'].triggered.connect(self._save_image)
self._actions['save_image'].setEnabled(False)
## Load Label Config
#self._actions['ld_lbl'] = QAction('Load Label', self)
#self._actions['ld_lbl'].triggered.connect(self._ld_lbl)
#self._actions['ld_lbl'].setEnabled(False)
## Load Global Label Config
#self._actions['ld_glbl'] = QAction('Load Global Label', self)
#self._actions['ld_glbl'].triggered.connect(self._ld_glbl)
#self._actions['ld_glbl'].setEnabled(False)
# Close display
self._actions['close'] = QAction(self.tr("Close"), self)
self._actions['close'].setShortcut(self.tr("Ctrl+W"))
self._actions['close'].triggered.connect(self._close_display)
self._actions['close'].setEnabled(False)
# Quit action
self._actions['quit'] = QAction(QIcon(os.path.join(
self._icon_dir, 'quit.png')),
self.tr("&Quit"),
self)
self._actions['quit'].setShortcut(self.tr("Ctrl+Q"))
self._actions['quit'].triggered.connect(self.close)
# Grid view action
self._actions['grid_view'] = QAction(QIcon(os.path.join(
self._icon_dir, 'gridview.png')),
self.tr("Lightbox"),
self)
self._actions['grid_view'].triggered.connect(self._grid_view)
self._actions['grid_view'].setEnabled(False)
# Orth view action
self._actions['orth_view'] = QAction(QIcon(os.path.join(
self._icon_dir, 'orthview.png')),
self.tr("Orthographic"),
self)
self._actions['orth_view'].triggered.connect(self._orth_view)
self._actions['orth_view'].setEnabled(False)
# return original size
self._actions['original_view'] = QAction(QIcon(os.path.join(
self._icon_dir, 'original_size.png')),
self.tr("Reset view"),
self)
self._actions['original_view'].triggered.connect(self._reset_view)
self._actions['original_view'].setEnabled(False)
# whether display the cross hover
self._actions['cross_hover_view'] = QAction(QIcon(os.path.join(
self._icon_dir, 'cross_hover_enable.png')),
self.tr("Disable cross hover"),
self)
self._actions['cross_hover_view'].triggered.connect(self._display_cross_hover)
self._actions['cross_hover_view'].setEnabled(False)
# Binaryzation view action
self._actions['binarization'] = QAction(QIcon(os.path.join(
self._icon_dir, 'binarization.png')),
self.tr("Binarization"),
self)
self._actions['binarization'].triggered.connect(self._binarization)
self._actions['binarization'].setEnabled(False)
# Intersection action
self._actions['intersect'] = QAction(QIcon(os.path.join(
self._icon_dir, 'intersect.png')),
self.tr("Intersection"),
self)
self._actions['intersect'].triggered.connect(self._intersect)
self._actions['intersect'].setEnabled(False)
# Extract mean time course
self._actions['meants'] = QAction(QIcon(os.path.join(
self._icon_dir, 'voxel_curve.png')),
self.tr("Extract Mean Time Course"),
self)
self._actions['meants'].triggered.connect(self._meants)
self._actions['meants'].setEnabled(False)
# Voxel Stats
self._actions['voxelstats'] = QAction(self.tr("Voxel number stats"),
self)
self._actions['voxelstats'].triggered.connect(self._voxelstats)
self._actions['voxelstats'].setEnabled(False)
# Local Max action
self._actions['localmax'] = QAction(QIcon(os.path.join(
self._icon_dir, 'localmax.png')),
self.tr("Local Max"),
self)
self._actions['localmax'].triggered.connect(self._local_max)
self._actions['localmax'].setEnabled(False)
# Inversion action
self._actions['inverse'] = QAction(QIcon(os.path.join(
self._icon_dir, 'inverse.png')),
self.tr("Inversion"),
self)
self._actions['inverse'].triggered.connect(self._inverse)
self._actions['inverse'].setEnabled(False)
# Smoothing action
self._actions['smoothing'] = QAction(QIcon(os.path.join(
self._icon_dir, 'smoothing.png')),
self.tr("Smoothing"),
self)
self._actions['smoothing'].triggered.connect(self._smooth)
self._actions['smoothing'].setEnabled(False)
# Region Growing action
self._actions['region_grow'] = QAction(QIcon(os.path.join(
self._icon_dir, 'grow.png')),
self.tr("Region Growing"),
self)
self._actions['region_grow'].triggered.connect(self._region_grow)
self._actions['region_grow'].setEnabled(False)
# Lable Management action
self._actions['label_management'] = QAction(self.tr("Label Management"),
self)
self._actions['label_management'].triggered.connect(self._label_manage)
self._actions['label_management'].setEnabled(False)
# Snapshot
self._actions['snapshot'] = QAction(self.tr("Snapshot"), self)
self._actions['snapshot'].triggered.connect(self._snapshot)
self._actions['snapshot'].setEnabled(False)
# Watershed action
self._actions['watershed'] = QAction(QIcon(os.path.join(
self._icon_dir, 'watershed.png')),
self.tr("Watershed"),
self)
self._actions['watershed'].triggered.connect(self._watershed)
self._actions['watershed'].setEnabled(False)
# SLIC action
self._actions['slic'] = QAction(QIcon(os.path.join(
self._icon_dir, 'slic.png')),
self.tr("SLIC"),
self)
self._actions['slic'].triggered.connect(self._slic)
self._actions['slic'].setEnabled(False)
# Cluster action
self._actions['cluster'] = QAction(QIcon(os.path.join(
self._icon_dir, 'cluster.png')),
self.tr("Cluster"),
self)
self._actions['cluster'].triggered.connect(self._cluster)
self._actions['cluster'].setEnabled(False)
# Opening
self._actions['opening'] = QAction(self.tr("Opening"), self)
self._actions['opening'].triggered.connect(self._opening)
self._actions['opening'].setEnabled(False)
# Binary_erosion view action
self._actions['binaryerosion'] = QAction(self.tr("Binary Erosion"), self)
self._actions['binaryerosion'].triggered.connect(self._binaryerosion)
self._actions['binaryerosion'].setEnabled(False)
# Binary_dilation view action
self._actions['binarydilation'] = QAction(self.tr("Binary Dilation"), self)
self._actions['binarydilation'].triggered.connect(self._binarydilation)
self._actions['binarydilation'].setEnabled(False)
# grey_erosion view action
self._actions['greyerosion'] = QAction(self.tr("Grey Erosion"), self)
self._actions['greyerosion'].triggered.connect(self._greyerosion)
self._actions['greyerosion'].setEnabled(False)
# grey_dilation view action
self._actions['greydilation'] = QAction(self.tr("Grey Dilation"), self)
self._actions['greydilation'].triggered.connect(self._greydilation)
self._actions['greydilation'].setEnabled(False)
# About software
self._actions['about_freeroi'] = QAction(self.tr("About FreeROI"), self)
self._actions['about_freeroi'].triggered.connect(self._about_freeroi)
# About Qt
self._actions['about_qt'] = QAction(QIcon(os.path.join(
self._icon_dir, 'qt.png')),
self.tr("About Qt"),
self)
self._actions['about_qt'].triggered.connect(qApp.aboutQt)
# Hand
self._actions['hand'] = QAction(QIcon(os.path.join(
self._icon_dir, 'hand.png')),
self.tr("Hand"),
self)
self._actions['hand'].triggered.connect(self._hand_enable)
self._actions['hand'].setCheckable(True)
self._actions['hand'].setChecked(False)
self._actions['hand'].setEnabled(False)
# Cursor
self._actions['cursor'] = QAction(QIcon(os.path.join(
self._icon_dir, 'cursor.png')),
self.tr("Cursor"),
self)
self._actions['cursor'].triggered.connect(self._cursor_enable)
self._actions['cursor'].setCheckable(True)
self._actions['cursor'].setChecked(True)
self._actions['cursor'].setEnabled(True)
# Edit
self._actions['edit'] = QAction(QIcon(os.path.join(
self._icon_dir, 'edit.png')),
self.tr("Edit"),
self)
self._actions['edit'].triggered.connect(self._roidialog_enable)
self._actions['edit'].setCheckable(True)
self._actions['edit'].setChecked(False)
# Undo
self._actions['undo'] = QAction(QIcon(os.path.join(
self._icon_dir, 'undo.png')),
self.tr("Undo"),
self)
self._actions['undo'].triggered.connect(self._undo)
# Redo
self._actions['redo'] = QAction(QIcon(os.path.join(
self._icon_dir, 'redo.png')),
self.tr("Redo"),
self)
self._actions['redo'].triggered.connect(self._redo)
# sphere and cube roi
self._actions['regular_roi'] = QAction(QIcon(os.path.join(
self._icon_dir, 'sphere_and_cube.png')),
self.tr("Regular ROI"),
self)
self._actions['regular_roi'].triggered.connect(self._regular_roi)
self._actions['regular_roi'].setEnabled(False)
# sphere and cube roi from csv file
self._actions['regular_roi_from_csv'] = QAction(QIcon(os.path.join(
self._icon_dir, 'sphere_and_cube.png')),
self.tr("Regular ROI From CSV File"),
self)
self._actions['regular_roi_from_csv'].triggered.connect(self._regular_roi_from_csv_file)
self._actions['regular_roi_from_csv'].setEnabled(False)
# ROI to Interface
self._actions['r2i'] = QAction(QIcon(os.path.join(
self._icon_dir, 'r2i.png')),
self.tr("ROI2Interface"),
self)
self._actions['r2i'].triggered.connect(self._r2i)
self._actions['r2i'].setEnabled(False)
# Edge detection for ROI
self._actions['edge_dete'] = QAction(QIcon(os.path.join(
self._icon_dir, 'edge_detection.png')),
self.tr("Edge Detection"),
self)
self._actions['edge_dete'].triggered.connect(self._edge_detection)
self._actions['edge_dete'].setEnabled(False)
# Atlas information
self._actions['atlas'] = QAction(QIcon(os.path.join(
self._icon_dir, 'atlas.png')),
self.tr("Candidate Label"),
self)
self._actions['atlas'].triggered.connect(self._atlas_dialog)
self._actions['atlas'].setEnabled(False)
# ROI Merging
self._actions['roi_merge'] = QAction(QIcon(os.path.join(
self._icon_dir, 'merging.png')),
self.tr("ROI Merging"),
self)
self._actions['roi_merge'].triggered.connect(self._roi_merge)
self._actions['roi_merge'].setEnabled(False)
def _add_toolbar(self):
"""Add toolbar."""
# Initialize a spinbox for zoom-scale selection
self._spinbox = QSpinBox()
self._spinbox.setMaximum(500)
self._spinbox.setMinimum(50)
self._spinbox.setSuffix('%')
self._spinbox.setSingleStep(10)
self._spinbox.setValue(self.default_grid_scale_factor * 100)
self._spinbox.valueChanged.connect(self._set_scale_factor)
# Add a toolbar
self._toolbar = self.addToolBar("Tools")
#self._toolbar.setIconSize(QSize(38,38))
# Add file actions
self._toolbar.addAction(self._actions['add_image'])
self._toolbar.addAction(self._actions['remove_image'])
self._toolbar.addAction(self._actions['new_image'])
self._toolbar.addAction(self._actions['save_image'])
# Add view actions
self._toolbar.addSeparator()
self._toolbar.addAction(self._actions['grid_view'])
self._toolbar.addAction(self._actions['orth_view'])
self._toolbar.addAction(self._actions['original_view'])
self._toolbar.addAction(self._actions['cross_hover_view'])
# Add cursor status
self._toolbar.addSeparator()
self._toolbar.addAction(self._actions['hand'])
self._toolbar.addAction(self._actions['cursor'])
self._toolbar.addAction(self._actions['edit'])
# Add undo redo
self._toolbar.addSeparator()
self._toolbar.addAction(self._actions['undo'])
self._toolbar.addAction(self._actions['redo'])
self._toolbar.addSeparator()
self._toolbar.addWidget(self._spinbox)
def _set_scale_factor(self, value):
"""Set scale factor."""
value = float(value) / 100
self.model.set_scale_factor(value, self.image_view.display_type())
def _add_template(self):
"""Open a dialog window and select a template file."""
template_dir = os.path.join(self.label_path, 'standard',
'MNI152_T1_2mm_brain.nii.gz')
template_name = QFileDialog.getOpenFileName(
self,
'Open standard file',
template_dir,
'Nifti files (*.nii.gz *.nii)')
if not template_name.isEmpty():
if sys.platform == 'win32':
template_path = unicode(template_name).encode('gb2312')
else:
template_path = str(template_name)
self._add_img(template_path)
def _add_image(self):
"""Add new item."""
if self._temp_dir == None:
temp_dir = QDir.currentPath()
else:
temp_dir = self._temp_dir
file_name = QFileDialog.getOpenFileName(self,
'Add new file',
temp_dir,
"Nifti files (*.nii *.nii.gz)")
if not file_name.isEmpty():
if sys.platform == 'win32':
file_path = unicode(file_name).encode('gb2312')
else:
file_path = str(file_name)
self._add_img(file_path)
def _duplicate_image(self):
"""Duplicate image."""
index = self.model.currentIndex()
dup_img = self.model._data[index.row()].duplicate()
self.model.insertRow(0, dup_img)
self.list_view.setCurrentIndex(self.model.index(0))
# change button status
self._actions['remove_image'].setEnabled(True)
def _add_img(self, source, name=None, header=None, view_min=None,
view_max=None, alpha=255, colormap='gray'):
""" Add image."""
# If model is NULL, then re-initialize it.
if not self.model:
self._init_label_config_center()
self.model = VolumeListModel([], self._label_config_center)
self.model.set_scale_factor(self.default_grid_scale_factor, 'grid')
self.model.set_scale_factor(self.default_orth_scale_factor, 'orth')
self.painter_status = PainterStatus(ViewSettings())
# Save previous opened directory (except `standard` directory)
file_path = source
if sys.platform == 'win32':
temp_dir = os.path.dirname(unicode(file_path, 'gb2312'))
if not os.stat(temp_dir) == os.stat(os.path.join(self.label_path,
'standard')):
self._temp_dir = temp_dir
else:
temp_dir = os.path.dirname(file_path)
if not os.path.samefile(temp_dir, os.path.join(self.label_path,
'standard')):
self._temp_dir = temp_dir
if self.model.addItem(file_path, None, name, header, view_min,
view_max, alpha, colormap):
# Take different acions in different case.
# If only one data in VolumeList, then initialize views.
if self.model.rowCount() == 1:
# initialize views
self.list_view = LayerView(self._label_config_center)
self.list_view.setModel(self.model)
self._init_roi_dialog()
self.image_view = GridView(self.model, self.painter_status)
# initialize display layout
central_widget = QWidget()
layout = QHBoxLayout()
central_widget.setLayout(layout)
central_widget.layout().addWidget(self.list_view)
central_widget.layout().addWidget(self.image_view)
self.setCentralWidget(central_widget)
# add a toolbar
self._add_toolbar()
#self.setUnifiedTitleAndToolBarOnMac(True)
# change button status
self._actions['save_image'].setEnabled(True)
self._actions['duplicate_image'].setEnabled(True)
#self._actions['ld_lbl'].setEnabled(True)
#self._actions['ld_glbl'].setEnabled(True)
self._actions['new_image'].setEnabled(True)
self._actions['close'].setEnabled(True)
self._actions['orth_view'].setEnabled(True)
self._actions['cross_hover_view'].setEnabled(True)
self._actions['original_view'].setEnabled(True)
self._actions['undo'].setEnabled(False)
self._actions['redo'].setEnabled(False)
self._functional_module_set_enabled(True)
if not self.model.is_mni_space():
self._actions['atlas'].setEnabled(False)
# connect signals with slots
self.list_view.current_changed.connect(self._update_undo)
self.list_view.current_changed.connect(self._update_redo)
self.model.rowsInserted.connect(self._update_remove_image)
self.model.undo_stack_changed.connect(self._update_undo)
self.model.redo_stack_changed.connect(self._update_redo)
# set current volume index
self.list_view.setCurrentIndex(self.model.index(0))
# set crosshair as the center of the data
self.model.set_cross_pos([self.model.getY()/2,
self.model.getX()/2,
self.model.getZ()/2])
## Enable cursor tracking
# self.list_view._list_view.selectionModel().currentChanged.connect(
# self._switch_cursor_status)
elif self.model.rowCount() > 1:
self._actions['remove_image'].setEnabled(True)
# set current volume index
self.list_view.setCurrentIndex(self.model.index(0))
self.is_save_configure = True
else:
ret = QMessageBox.question(self,
'FreeROI',
'Cannot load ' + file_path + ': due to mismatch data size.\nNeed registration?',
QMessageBox.Cancel,
QMessageBox.Yes)
if ret == QMessageBox.Yes:
register_volume_dialog = RegisterVolumeDialog(self.model, file_path)
register_volume_dialog.exec_()
def __new_image(self):
"""Create new image."""
self._new_image()
def _update_remove_image(self):
"""Update the display after removing an image."""
if self.model.rowCount() == 1:
self._actions['remove_image'].setEnabled(False)
else:
self._actions['remove_image'].setEnabled(True)
def _new_image(self, data=None, name=None, colormap=None):
"""Create a new volume for brain parcellation."""
if colormap is None:
colormap = self._label_config_center.get_first_label_config()
self.model.new_image(data, name, None, colormap)
self.list_view.setCurrentIndex(self.model.index(0))
# change button status
self._actions['remove_image'].setEnabled(True)
def new_image_action(self):
"""Change the related status of other actions after creating an image."""
self._actions['remove_image'].setEnabled(True)
def _remove_image(self):
"""Remove current image."""
row = self.list_view.currentRow()
self.model.delItem(row)
if self.model.rowCount() == 1:
self._actions['remove_image'].setEnabled(False)
def _save_image(self):
"""Save image as a nifti file."""
index = self.model.currentIndex()
if not self._temp_dir:
temp_dir = str(QDir.currentPath())
else:
temp_dir = self._temp_dir
file_path = os.path.join(temp_dir,
str(self.model.data(index, Qt.DisplayRole)))
file_types = "Compressed NIFTI file(*.nii.gz);;NIFTI file(*.nii)"
path,filter = QFileDialog.getSaveFileNameAndFilter(
self,
'Save image as...',
file_path,
file_types,)
if filter == 'NIFTI file(*.nii)':
path += '.nii'
else:
path += '.nii.gz'
if not path.isEmpty():
if sys.platform == 'win32':
path = unicode(path).encode('gb2312')
self._temp_dir = os.path.dirname(unicode(path, 'gb2312'))
else:
path = str(path)
self._temp_dir = os.path.dirname(path)
self.model._data[index.row()].save2nifti(path)
def _close_display(self):
"""Close current display."""
self.setCentralWidget(QWidget())
self._set_scale_factor(self.default_grid_scale_factor)
self.removeToolBar(self._toolbar)
self.model = None
self._actions['add_template'].setEnabled(True)
self._actions['add_image'].setEnabled(True)
self._actions['remove_image'].setEnabled(False)
self._actions['new_image'].setEnabled(False)
self._actions['save_image'].setEnabled(False)
self._actions['duplicate_image'].setEnabled(False)
#self._actions['ld_glbl'].setEnabled(False)
#self._actions['ld_lbl'].setEnabled(False)
self._actions['close'].setEnabled(False)
self._actions['grid_view'].setEnabled(False)
self._actions['orth_view'].setEnabled(False)
self._actions['cross_hover_view'].setEnabled(False)
self._actions['original_view'].setEnabled(False)
self._actions['snapshot'].setEnabled(False)
self._functional_module_set_enabled(False)
def _about_freeroi(self):
""" About software."""
QMessageBox.about(self, self.tr("About FreeROI"),
self.tr("<p><b>FreeROI</b> is a versatile image "
"processing software developed for "
"neuroimaging data.</p>"
"<p>Its goal is to provide a user-friendly "
"interface for neuroimaging researchers "
"to visualize and analyze their data, "
"especially in defining region of interest "
"(ROI) for ROI analysis.</p>"
"<p>Version: " + __version__ + "</p>"
"<p>Written by: Lijie Huang, Zetian Yang, "
"Guangfu Zhou, Zhaoguo Liu, Xiaobin Dang, "
"Xiangzhen Kong, Xu Wang, and Zonglei Zhen."
"</p>"
"<p><b>FreeROI</b> is under Revised BSD "
"License.</p>"
"<p>Copyright(c) 2012-2015 "
"Neuroinformatic Team in LiuLab "
"from Beijing Normal University</p>"
"<p></p>"
"<p>Please join and report bugs to:</p>"
"<p><b>nitk-user@googlegroups.com</b></p>"))
def _create_menus(self):
"""Create menus."""
self.file_menu = self.menuBar().addMenu(self.tr("File"))
self.file_menu.addAction(self._actions['add_image'])
self.file_menu.addAction(self._actions['add_template'])
self.file_menu.addSeparator()
self.file_menu.addAction(self._actions['new_image'])
self.file_menu.addAction(self._actions['remove_image'])
self.file_menu.addAction(self._actions['duplicate_image'])
self.file_menu.addAction(self._actions['save_image'])
#self.file_menu.addAction(self._actions['ld_lbl'])
#self.file_menu.addAction(self._actions['ld_glbl'])
self.file_menu.addSeparator()
self.file_menu.addAction(self._actions['close'])
self.file_menu.addAction(self._actions['quit'])
#self.volume_menu = self.menuBar().addMenu(self.tr("Volume"))
#self.volume_menu.addAction(self._actions['new_image'])
#self.volume_menu.addAction(self._actions['remove_image'])
self.view_menu = self.menuBar().addMenu(self.tr("View"))
self.view_menu.addAction(self._actions['grid_view'])
self.view_menu.addAction(self._actions['orth_view'])
self.view_menu.addAction(self._actions['original_view'])
self.view_menu.addAction(self._actions['cross_hover_view'])
self.tool_menu = self.menuBar().addMenu(self.tr("Tools"))
# Basic tools
basic_tools = self.tool_menu.addMenu(self.tr("Basic Tools"))
basic_tools.addAction(self._actions['binarization'])
basic_tools.addAction(self._actions['intersect'])
basic_tools.addAction(self._actions['localmax'])
basic_tools.addAction(self._actions['inverse'])
basic_tools.addAction(self._actions['smoothing'])
basic_tools.addAction(self._actions['meants'])
basic_tools.addAction(self._actions['voxelstats'])
# Segment tools
segment_tools = self.tool_menu.addMenu(self.tr("Segmentation"))
segment_tools.addAction(self._actions['region_grow'])
segment_tools.addAction(self._actions['watershed'])
segment_tools.addAction(self._actions['slic'])
segment_tools.addAction(self._actions['cluster'])
# ROI tools
roi_tools = self.tool_menu.addMenu(self.tr("ROI Tools"))
roi_tools.addAction(self._actions['edge_dete'])
roi_tools.addAction(self._actions['roi_merge'])
roi_tools.addAction(self._actions['regular_roi'])
roi_tools.addAction(self._actions['regular_roi_from_csv'])
roi_tools.addAction(self._actions['r2i'])
# Morphological tools
morphological_tools = self.tool_menu.addMenu(
self.tr("Morphological Processing"))
morphological_tools.addAction(self._actions['opening'])
morphological_tools.addAction(self._actions['binarydilation'])
morphological_tools.addAction(self._actions['binaryerosion'])
morphological_tools.addAction(self._actions['greydilation'])
morphological_tools.addAction(self._actions['greyerosion'])
# label management
self.tool_menu.addAction(self._actions['atlas'])
self.tool_menu.addAction(self._actions['label_management'])
self.tool_menu.addAction(self._actions['snapshot'])
self.help_menu = self.menuBar().addMenu(self.tr("Help"))
self.help_menu.addAction(self._actions['about_freeroi'])
self.help_menu.addAction(self._actions['about_qt'])
def _cursor_enable(self):
"""Cursor enabled."""
if self._actions['cursor'].isChecked():
self._actions['cursor'].setChecked(True)
if isinstance(self.image_view, OrthView):
self._actions['hand'].setChecked(False)
if self.roidialog.isVisible():
self._roidialog_disable()
self.painter_status.set_draw_settings(ViewSettings())
self.image_view.set_cursor(Qt.ArrowCursor)
self.image_view.set_label_mouse_tracking(True)
else:
self._actions['cursor'].setChecked(True)
def _voxel_edit_enable(self):
"""Brush enabled."""
self._label_config_center.set_is_roi_edit(False)
self.painter_status.set_draw_settings(self._label_config_center)
self.image_view.set_cursor(Qt.CrossCursor)
self.image_view.set_label_mouse_tracking(False)
def _roi_edit_enable(self):
"""ROI brush enabled."""
self._label_config_center.set_is_roi_edit(True)
self.painter_status.set_draw_settings(self._label_config_center)
self.image_view.set_cursor(Qt.CrossCursor)
self.image_view.set_label_mouse_tracking(False)
def _roidialog_enable(self):
"""ROI dialog enabled."""
if self._actions['edit'].isChecked():
self._actions['cursor'].setChecked(False)
if isinstance(self.image_view, OrthView):
self._actions['hand'].setChecked(False)
self._actions['edit'].setChecked(True)
self.roidialog._voxel_clicked()
self.roidialog.show()
else:
self._actions['edit'].setChecked(True)
def _atlas_dialog(self):
"""Atlas information dialog."""
if 'atlasdialog' in self.__dict__:
self.atlasdialog.show()
else:
self.atlasdialog = AtlasDialog(self.model, self)
self.atlasdialog.show()
def _roi_batch_enable(self):
"""ROI batch enabled."""
self.image_view.set_label_mouse_tracking(False)
self._label_config_center.set_is_roi_edit(False)
self.painter_status.set_draw_settings(self.roidialog)
def _roidialog_disable(self):
"""Disable the roi dialog."""
self.roidialog.hide()
self._actions['edit'].setChecked(False)
def _hand_enable(self):
"""Hand enabled."""
if self._actions['hand'].isChecked():
self._actions['cursor'].setChecked(False)
self._actions['hand'].setChecked(True)
if hasattr(self, 'roidialog'):
self._roidialog_disable()
self.painter_status.set_draw_settings(MoveSettings())
self.image_view.set_cursor(Qt.OpenHandCursor)
self.image_view.set_label_mouse_tracking(True)
else:
self._actions['hand'].setChecked(True)
def _switch_cursor_status(self):
"""Change the cursor status."""
self._actions['cursor'].setChecked(True)
self._cursor_enable()
def _update_undo(self):
"""Update the undo status."""
if self.model.current_undo_available():
self._actions['undo'].setEnabled(True)
else:
self._actions['undo'].setEnabled(False)
def _update_redo(self):
"""Update the redo status."""
if self.model.current_redo_available():
self._actions['redo'].setEnabled(True)
else:
self._actions['redo'].setEnabled(False)
def _init_roi_dialog(self):
"""Initialize ROI Dialog."""
self._actions['label_management'].setEnabled(False)
self.roidialog = ROIDialog(self.model, self._label_config_center, self)
self.roidialog.voxel_edit_enabled.connect(self._voxel_edit_enable)
self.roidialog.roi_edit_enabled.connect(self._roi_edit_enable)
self.roidialog.roi_batch_enabled.connect(self._roi_batch_enable)
self.list_view._list_view.selectionModel().currentChanged.connect(
self.roidialog.clear_rois)
def _init_label_config_center(self):
"""Initialize LabelConfigCenter."""
lbl_path = os.path.join(self.label_config_dir,
'*.' + self.label_config_suffix)
label_configs = glob.glob(lbl_path)
self.label_configs = map(LabelConfig, label_configs)
self._list_view_model = QStandardItemModel()
# _list_view_model.appendRow(QStandardItem("None"))
for x in self.label_configs:
self._list_view_model.appendRow(QStandardItem(x.get_name()))
self._label_models = []
for item in self.label_configs:
model = QStandardItemModel()
indexs = sorted(item.get_index_list())
for index in indexs:
text_index_icon_item = QStandardItem(gen_label_color(item.get_label_color(item.get_index_label(index))),
str(index) + ' ' + item.get_index_label(index))
model.appendRow(text_index_icon_item)
self._label_models.append(model)
self._label_config_center = LabelConfigCenter(self.label_configs, self._list_view_model, self._label_models)
def _get_label_config(self, file_path):
"""Get label config file."""
# Get label config file
dir = os.path.dirname(file_path)
file = os.path.basename(file_path)
split_list = file.split('.')
nii_index = split_list.index('nii')
file = ''.join(split_list[:nii_index])
config_file = os.path.join(file, 'lbl')
if os.path.isfile(config_file):
label_config = LabelConfig(config_file, False)
else:
label_config = self.label_config
return label_config
def _undo(self):
"""The undo action."""
self.model.undo_current_image()
def _redo(self):
"""The redo action."""
self.model.redo_current_image()
def _regular_roi(self):
"""Generate regular(cube, sphere, etc.) roi dialog."""
regular_roi_dialog = RegularROIDialog(self.model)
regular_roi_dialog.exec_()
def _regular_roi_from_csv_file(self):
"""Generate regular(cube, sphere, etc.) roi from csv file."""
regular_roi_from_csv_file = RegularROIFromCSVFileDialog(self.model)
regular_roi_from_csv_file.exec_()
def _edge_detection(self):
"""Detect the image edge."""
edge_detection(self.model)
def _roi_merge(self):
"""ROI merge dialog."""
new_dialog = ROIMergeDialog(self.model)
new_dialog.exec_()
def _r2i(self):
"""ROI to gwmi dialog."""
new_dialog = Roi2gwmiDialog(self.model)
new_dialog.exec_()
def _opening(self):
"""Opening Dialog which using the opening algorithm to process the image."""
new_dialog = OpenDialog(self.model)
new_dialog.exec_()
def _voxelstats(self):
"""Voxel statistical analysis dialog."""
new_dialog = VoxelStatsDialog(self.model, self)
new_dialog.show()
def _label_manage(self):
"""Label management dialog."""
self.label_manage_dialog = LabelManageDialog(self.label_configs,
self._list_view_model,
self._label_models,
self.label_config_dir,
self.label_config_suffix,
self)
self.label_manage_dialog.exec_()
def _ld_lbl(self):
"""Local label config file."""
file_name = QFileDialog.getOpenFileName(self,
'Load Label File',
QDir.currentPath(),
"Label files (*.lbl)")
if file_name:
label_config = LabelConfig(str(file_name), False)
self.model.set_cur_label(label_config)
def _ld_glbl(self):
"""Local global label config file."""
file_name = QFileDialog.getOpenFileName(self,
'Load Label File',
QDir.currentPath(),
"Label files (*.lbl)")
if file_name:
label_config = LabelConfig(str(file_name), True)
self.model.set_global_label(label_config)
def _grid_view(self):
"""Grid view option."""
self._actions['grid_view'].setEnabled(False)
self._actions['orth_view'].setEnabled(True)
self._actions['hand'].setEnabled(False)
self._actions['snapshot'].setEnabled(False)
self._actions['cursor'].trigger()
self.centralWidget().layout().removeWidget(self.image_view)
self.image_view.set_display_type('grid')
self.model.scale_changed.disconnect()
self.model.repaint_slices.disconnect()
self.model.cross_pos_changed.disconnect(self.image_view.update_cross_pos)
self.image_view.deleteLater()
self._spinbox.setValue(100 * self.model.get_scale_factor('grid'))
self.image_view = GridView(self.model, self.painter_status,
self._gridview_vertical_scrollbar_position)
self.centralWidget().layout().addWidget(self.image_view)
def _orth_view(self):
"""Orth view option."""
self._actions['orth_view'].setEnabled(False)
self._actions['grid_view'].setEnabled(True)
self._actions['snapshot'].setEnabled(True)
self._actions['hand'].setEnabled(True)
self._actions['cursor'].trigger()
self._gridview_vertical_scrollbar_position = \
self.image_view.get_vertical_srollbar_position()
self.centralWidget().layout().removeWidget(self.image_view)
self.image_view.set_display_type('orth')
self.model.scale_changed.disconnect()
self.model.repaint_slices.disconnect()
self.model.cross_pos_changed.disconnect(self.image_view.update_cross_pos)
self.image_view.deleteLater()
self._spinbox.setValue(100 * self.model.get_scale_factor('orth'))
self.image_view = OrthView(self.model, self.painter_status)
self.centralWidget().layout().addWidget(self.image_view)
def _display_cross_hover(self):
"""Display the cross hover on the image."""
if self.model._display_cross:
self.model.set_cross_status(False)
self._actions['cross_hover_view'].setText('Enable cross hover')
self._actions['cross_hover_view'].setIcon(QIcon(os.path.join(self._icon_dir,'cross_hover_disable.png')))
else:
self.model.set_cross_status(True)
self._actions['cross_hover_view'].setText('Disable cross hover')
self._actions['cross_hover_view'].setIcon(QIcon(os.path.join(self._icon_dir,'cross_hover_enable.png')))
def _reset_view(self):
"""Reset view parameters."""
if self.image_view.display_type() == 'orth':
if not self.model.get_scale_factor('orth') == \
self.default_orth_scale_factor:
self._spinbox.setValue(100 * self.default_orth_scale_factor)
self.image_view.reset_view()
elif self.image_view.display_type() == 'grid':
if not self.model.get_scale_factor('grid') == \
self.default_grid_scale_factor:
self._spinbox.setValue(100 * self.default_grid_scale_factor)
def _binarization(self):
"""Image binarization dialog."""
binarization_dialog = BinarizationDialog(self.model)
binarization_dialog.exec_()
def _binaryerosion(self):
"""Image binaryerosion dialog."""
binaryerosion_dialog = BinaryerosionDialog(self.model)
binaryerosion_dialog.exec_()
def _binarydilation(self):
"""Image binarydilation dialog."""
binarydilation_dialog = BinarydilationDialog(self.model)
binarydilation_dialog.exec_()
def _greyerosion(self):
"""Image greyerosion dialog."""
greyerosiondialog = GreyerosionDialog(self.model)
greyerosiondialog.exec_()
def _greydilation(self):
"""Image greydilation dialog."""
greydilation_dialog = GreydilationDialog(self.model)
greydilation_dialog.exec_()
def _intersect(self):
"""Image intersect dialog."""
intersect_dialog = IntersectDialog(self.model)
intersect_dialog.exec_()
def _meants(self):
"""Image meants dialog."""
new_dialog = MeanTSDialog(self.model)
new_dialog.exec_()
def _local_max(self):
"""Compute image local max value dialog."""
new_dialog = LocalMaxDialog(self.model, self)
new_dialog.exec_()
def _inverse(self):
"""Inverse the given image."""
inverse_image(self.model)
def _smooth(self):
"""Image smooth dialog."""
new_dialog = SmoothingDialog(self.model)
new_dialog.exec_()
def _region_grow(self):
"""Image region grow dialog."""
new_dialog = GrowDialog(self.model, self)
new_dialog.exec_()
def _watershed(self):
"""Image watershed dialog."""
new_dialog = WatershedDialog(self.model, self)
new_dialog.exec_()
def _slic(self):
"""Image supervoxel segmentation dialog."""
new_dialog = SLICDialog(self.model, self)
new_dialog.exec_()
def _cluster(self):
"""Image cluster dialog."""
new_dialog = ClusterDialog(self.model, self)
new_dialog.exec_()
def _functional_module_set_enabled(self, status):
"""Enable the actions."""
self._actions['binarization'].setEnabled(status)
self._actions['intersect'].setEnabled(status)
self._actions['meants'].setEnabled(status)
self._actions['voxelstats'].setEnabled(status)
self._actions['localmax'].setEnabled(status)
self._actions['inverse'].setEnabled(status)
self._actions['smoothing'].setEnabled(status)
self._actions['atlas'].setEnabled(status)
self._actions['region_grow'].setEnabled(status)
self._actions['watershed'].setEnabled(status)
self._actions['slic'].setEnabled(status)
self._actions['cluster'].setEnabled(status)
self._actions['opening'].setEnabled(status)
self._actions['binarydilation'].setEnabled(status)
self._actions['binaryerosion'].setEnabled(status)
self._actions['greydilation'].setEnabled(status)
self._actions['greyerosion'].setEnabled(status)
self._actions['regular_roi'].setEnabled(status)
self._actions['regular_roi_from_csv'].setEnabled(status)
self._actions['label_management'].setEnabled(status)
self._actions['r2i'].setEnabled(status)
self._actions['edge_dete'].setEnabled(status)
self._actions['roi_merge'].setEnabled(status)
def _snapshot(self):
"""Capture images from OrthView."""
self.image_view.save_image()
|
|
#!/usr/bin/env python
#
# Camlistore blob server for App Engine.
#
# Derived from Brad's Brackup-gae utility:
# http://github.com/bradfitz/brackup-gae-server
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Upload server for camlistore.
To test:
# Stat -- 200 response
curl -v \
-d camliversion=1 \
http://localhost:8080/camli/stat
# Upload -- 200 response
curl -v -L \
-F sha1-126249fd8c18cbb5312a5705746a2af87fba9538=@./test_data.txt \
<the url returned by stat>
# Put with bad blob_ref parameter -- 400 response
curl -v -L \
-F sha1-22a7fdd575f4c3e7caa3a55cc83db8b8a6714f0f=@./test_data.txt \
<the url returned by stat>
# Get present -- the blob
curl -v http://localhost:8080/camli/\
sha1-126249fd8c18cbb5312a5705746a2af87fba9538
# Get missing -- 404
curl -v http://localhost:8080/camli/\
sha1-22a7fdd575f4c3e7caa3a55cc83db8b8a6714f0f
# Check present -- 200 with only headers
curl -I http://localhost:8080/camli/\
sha1-126249fd8c18cbb5312a5705746a2af87fba9538
# Check missing -- 404 with empty list response
curl -I http://localhost:8080/camli/\
sha1-22a7fdd575f4c3e7caa3a55cc83db8b8a6714f0f
# List -- 200 with list of blobs (just one)
curl -v http://localhost:8080/camli/enumerate-blobs&limit=1
# List offset -- 200 with list of no blobs
curl -v http://localhost:8080/camli/enumerate-blobs?after=\
sha1-126249fd8c18cbb5312a5705746a2af87fba9538
"""
import cgi
import hashlib
import logging
import urllib
import wsgiref.handlers
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import blobstore_handlers
import config
class Blob(db.Model):
"""Some content-addressable blob.
The key is the algorithm, dash, and the lowercase hex digest:
"sha1-f1d2d2f924e986ac86fdf7b36c94bcdf32beec15"
"""
# The actual bytes.
blob = blobstore.BlobReferenceProperty(indexed=False)
# Size. (already in the blobinfo, but denormalized for speed)
size = db.IntegerProperty(indexed=False)
class HelloHandler(webapp.RequestHandler):
"""Present ourselves to the world."""
def get(self):
self.response.out.write('Hello! This is an AppEngine Camlistore '
'blob server.<p>')
self.response.out.write('<a href=js/index.html>js frontend</a>')
class ListHandler(webapp.RequestHandler):
"""Return chunks that the server has."""
def get(self):
after_blob_ref = self.request.get('after')
limit = max(1, min(1000, int(self.request.get('limit') or 1000)))
query = Blob.all().order('__key__')
if after_blob_ref:
query.filter('__key__ >', db.Key.from_path(Blob.kind(), after_blob_ref))
blob_ref_list = query.fetch(limit)
self.response.headers['Content-Type'] = 'text/javascript'
out = [
'{\n'
' "blobs": ['
]
if blob_ref_list:
out.extend([
'\n ',
',\n '.join(
'{"blobRef": "%s", "size": %d}' %
(b.key().name(), b.size) for b in blob_ref_list),
'\n ',
])
if blob_ref_list and len(blob_ref_list) == limit:
out.append(
'],'
'\n "continueAfter": "%s"\n'
'}' % blob_ref_list[-1].key().name())
else:
out.append(
']\n'
'}'
)
self.response.out.write(''.join(out))
class GetHandler(blobstore_handlers.BlobstoreDownloadHandler):
"""Gets a blob with the given ref."""
def head(self, blob_ref):
self.get(blob_ref)
def get(self, blob_ref):
blob = Blob.get_by_key_name(blob_ref)
if not blob:
self.error(404)
return
self.send_blob(blob.blob, 'application/octet-stream')
class StatHandler(webapp.RequestHandler):
"""Handler to return a URL for a script to get an upload URL."""
def stat_key(self):
return "stat"
def get(self):
self.handle()
def post(self):
self.handle()
def handle(self):
if self.request.get('camliversion') != '1':
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('Bad parameter: "camliversion"')
self.response.set_status(400)
return
blob_ref_list = []
for key, value in self.request.params.items():
if not key.startswith('blob'):
continue
try:
int(key[len('blob'):])
except ValueError:
logging.exception('Bad parameter: %s', key)
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('Bad parameter: "%s"' % key)
self.response.set_status(400)
return
else:
blob_ref_list.append(value)
key_name = self.stat_key()
self.response.headers['Content-Type'] = 'text/javascript'
out = [
'{\n'
' "maxUploadSize": %d,\n'
' "uploadUrl": "%s",\n'
' "uploadUrlExpirationSeconds": 600,\n'
' "%s": [\n'
% (config.MAX_UPLOAD_SIZE,
blobstore.create_upload_url('/upload_complete'),
key_name)
]
already_have = db.get([
db.Key.from_path(Blob.kind(), b) for b in blob_ref_list])
if already_have:
out.extend([
'\n ',
',\n '.join(
'{"blobRef": "%s", "size": %d}' %
(b.key().name(), b.size) for b in already_have if b is not None),
'\n ',
])
out.append(
']\n'
'}'
)
self.response.out.write(''.join(out))
class PostUploadHandler(StatHandler):
def stat_key(self):
return "received"
class UploadHandler(blobstore_handlers.BlobstoreUploadHandler):
"""Handle blobstore post, as forwarded by notification agent."""
def compute_blob_ref(self, hash_func, blob_key):
"""Computes the blob ref for a blob stored using the given hash function.
Args:
hash_func: The name of the hash function (sha1, md5)
blob_key: The BlobKey of the App Engine blob containing the blob's data.
Returns:
A newly computed blob_ref for the data.
"""
hasher = hashlib.new(hash_func)
last_index = 0
while True:
data = blobstore.fetch_data(
blob_key, last_index, last_index + blobstore.MAX_BLOB_FETCH_SIZE - 1)
if not data:
break
hasher.update(data)
last_index += len(data)
return '%s-%s' % (hash_func, hasher.hexdigest())
def store_blob(self, blob_ref, blob_info, error_messages):
"""Store blob information.
Writes a Blob to the datastore for the uploaded file.
Args:
blob_ref: The file that was uploaded.
upload_file: List of BlobInfo records representing the uploads.
error_messages: Empty list for storing error messages to report to user.
"""
if not blob_ref.startswith('sha1-'):
error_messages.append('Only sha1 supported for now.')
return
if len(blob_ref) != (len('sha1-') + 40):
error_messages.append('Bogus blobRef.')
return
found_blob_ref = self.compute_blob_ref('sha1', blob_info.key())
if blob_ref != found_blob_ref:
error_messages.append('Found blob ref %s, expected %s' %
(found_blob_ref, blob_ref))
return
def txn():
logging.info('Saving blob "%s" with size %d', blob_ref, blob_info.size)
blob = Blob(key_name=blob_ref, blob=blob_info.key(), size=blob_info.size)
blob.put()
db.run_in_transaction(txn)
def post(self):
"""Do upload post."""
error_messages = []
blob_info_dict = {}
for key, value in self.request.params.items():
if isinstance(value, cgi.FieldStorage):
if 'blob-key' in value.type_options:
blob_info = blobstore.parse_blob_info(value)
blob_info_dict[value.name] = blob_info
logging.info("got blob: %s" % value.name)
self.store_blob(value.name, blob_info, error_messages)
if error_messages:
logging.error('Upload errors: %r', error_messages)
blobstore.delete(blob_info_dict.values())
self.response.set_status(303)
# TODO: fix up this format
self.response.headers.add_header("Location", '/error?%s' % '&'.join(
'error_message=%s' % urllib.quote(m) for m in error_messages))
else:
query = ['/nonstandard/upload_complete?camliversion=1']
query.extend('blob%d=%s' % (i + 1, k)
for i, k in enumerate(blob_info_dict.iterkeys()))
self.response.set_status(303)
self.response.headers.add_header("Location", str('&'.join(query)))
class ErrorHandler(webapp.RequestHandler):
"""The blob put failed."""
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('\n'.join(self.request.get_all('error_message')))
self.response.set_status(400)
class DebugUploadForm(webapp.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
uploadurl = blobstore.create_upload_url('/upload_complete')
self.response.out.write('<body><form method="post" enctype="multipart/form-data" action="%s">' % uploadurl)
self.response.out.write('<input type="file" name="sha1-f628050e63819347a095645ad9ae697415664f0">')
self.response.out.write('<input type="submit"></form></body>')
APP = webapp.WSGIApplication(
[
('/', HelloHandler),
('/debug/upform', DebugUploadForm),
('/camli/enumerate-blobs', ListHandler),
('/camli/stat', StatHandler),
('/camli/([^/]+)', GetHandler),
('/nonstandard/upload_complete', PostUploadHandler),
('/upload_complete', UploadHandler), # Admin only.
('/error', ErrorHandler),
],
debug=True)
def main():
wsgiref.handlers.CGIHandler().run(APP)
if __name__ == '__main__':
main()
|
|
#Generic Geocoding Script
#Requires the GeoPy library and Python 2.7 to run.
#To test without having geopy installed comment out 'import geopy', 'from geopy.geocoders import Nominatim', and 'geolocator = Nominatim()' and all geocoding will fail.
#For editing csv files
import csv
import shutil
import geopy
#For geocoding
from geopy.geocoders import Nominatim
geolocator = Nominatim()
#Variables for hard coding. Comment out the three user input processing calls below these four methods to skip the command prompts.
inputFile = ""
inputHeaders = []
outputFile = ""
#Taking user input for seed file location.
def fileLocation():
inputFile = str(raw_input("What is the location of your .csv formatted addresses?\nIt should be formated like this:\nC:\Temp\AuburnData_Clean.csv\n"))
print "Is this the correct file location? " + str(inputFile)
if userCheck() == False:
fileLocation()
else:
return inputFile
#Taking user input for formating of column headers.
def headersFormat():
inputHeaders = str(raw_input("What are the column headers in your .csv file?\nPlease enter you response in the following format:\nAddress, ... Latitude, Longitude\n"))
inputHeaders = inputHeaders.split(",")
print "Are these the correct column headers? " + str(inputHeaders).strip('[]')
if userCheck() == False:
headersFormat()
else:
return inputHeaders
#Taking user input for the location of the addresses in the CSV.
def addressColumn():
inputColumn = int(raw_input("Which column are the addresses located in your .csv file?\nPlease enter you response as an integer.\n"))
print "Is this the correct column number for the location of the addresses? " + str(inputColumn)
if userCheck() == False:
addressColumn()
else:
return inputColumn
#Taking user input for output file name and location.
def outputLocation():
outputFile = str(raw_input("Where do you want your output file to be placed and what do you want it named?\nPlease format your response like this:\nC:\Temp\AuburnData_Clean_geocoded.csv\n"))
print "Is this the correct file location? " + str(outputFile)
if userCheck() == False:
outputLocation()
else:
return outputFile
#Having users verify their inputs.
def userCheck():
verifyFile = str(raw_input("Yes or No? "))
valid = ['Yes', 'yes', 'y', 'Y', 'True', 'true', 'yse', 'Yse', 'YES']
if verifyFile in valid:
print "Information verified by user."
else:
return False
#For attempting to fix addresses that failed to geocode.
def fixIt(address):
#Working varibles.
isAddressFixed = False
fixedAddress = ' '
thisAddress = address
print thisAddress
#Checking for blanks
if thisAddress == '':
return isAddressFixed
#Actual fixing.
thisAddress = thisAddress.strip()
thisAddress = thisAddress.upper()
print thisAddress
#Removing special characters that do not occur in US addresses.
thisAddress = thisAddress.replace("~", "")
thisAddress = thisAddress.replace("`", "")
thisAddress = thisAddress.replace("!", "")
thisAddress = thisAddress.replace("@", "")
thisAddress = thisAddress.replace("#", "")
thisAddress = thisAddress.replace("$", "")
thisAddress = thisAddress.replace("%", "")
thisAddress = thisAddress.replace("^", "")
thisAddress = thisAddress.replace("&", "")
thisAddress = thisAddress.replace("*", "")
thisAddress = thisAddress.replace("(", "")
thisAddress = thisAddress.replace(")", "")
thisAddress = thisAddress.replace("_", "")
#"-" is purposely missing from this list.
thisAddress = thisAddress.replace("+", "")
thisAddress = thisAddress.replace("=", "")
thisAddress = thisAddress.replace("[", "")
thisAddress = thisAddress.replace("]", "")
thisAddress = thisAddress.replace("{", "")
thisAddress = thisAddress.replace("}", "")
thisAddress = thisAddress.replace("", "")#\
thisAddress = thisAddress.replace("|", "")
thisAddress = thisAddress.replace(":", "")
thisAddress = thisAddress.replace(";", "")
thisAddress = thisAddress.replace("'", "")
thisAddress = thisAddress.replace(",", "")
thisAddress = thisAddress.replace("<", "")
thisAddress = thisAddress.replace(".", "")
thisAddress = thisAddress.replace(">", "")
thisAddress = thisAddress.replace("?", "")
thisAddress = thisAddress.replace("/", "")
thisAddress = thisAddress.replace("=", "")
print thisAddress
#Breaking the string down.
thisAddress = thisAddress.split(' ')
for word in thisAddress:
word = word.strip()
if len(thisAddress) > 2:
if type(thisAddress[1]) == 'int':
if thisAddress[1][-1] == 1:
thisAddress[1] = thisAddress[1] + "ST"
elif thisAddress[1][-1] == 2:
thisAddress[1] = thisAddress[1] + "ND"
elif thisAddress[1][-1] == 3:
thisAddress[1] = thisAddress[1] + "RD"
else:
thisAddress[1] = thisAddress[1] + "TH"
elif type(thisAddress[2]) == 'int':
if thisAddress[1][-1] == 1:
thisAddress[1] = thisAddress[1] + "ST"
elif thisAddress[1][-1] == 2:
thisAddress[1] = thisAddress[1] + "ND"
elif thisAddress[1][-1] == 3:
thisAddress[1] = thisAddress[1] + "RD"
else:
thisAddress[1] = thisAddress[1] + "TH"
#Fixing street type.
for word in thisAddress:
if word == "ST" or "STR" or "STRE" or "STREE" or "STRT" or "STREET":
word = "ST"
elif word == "WAY" or "WY":
word = "WAY"
elif word == "AVE" or "AV" or "AVEN" or "AVENU" or "AVENUE" or "AVN" or "AVNUE" or "AVENUE":
word = "AVE"
elif word == "PL" or "PLACE":
word = "PL"
elif word == "RD" or "ROAD" or "RAD" or "ROD" or "RAOD":
word = "RD"
elif word == "BLVD" or "BOULEVARD" or "BOUL" or "BOULV":
word = "BOULEVARD"
elif word == "DRIVE" or "DR":
word = "DR"
elif word == "HWY" or "HIGHWAY" or "HWAY" :
word = "HWY"
#Putting things back where we found them.
fixedAddress = fixedAddress.join(thisAddress)
print fixedAddress
if fixedAddress != address:
isAddressFixed == True
return isAddressFixed
#User input processing calls. Comment out these method calls to skip user input.
inputFile = fileLocation()
inputHeaders = headersFormat()
addressColumn = addressColumn()
outputFile = outputLocation()
fixedAddress = ''
#For trouble shooting.
print "\ninputFile = " + str(inputFile) + "\ninputHeaders = " + str(inputHeaders) + "\noutputFile = " + str(outputFile) + "\n\n*****Begin Processing*****\n"
#Where the geocoding is done.
def actualGeocoding():
#Blank list for holding addresses.
addresses = []
#Filling list with addresses from input file.
with open (inputFile) as csvFile:
csv_data = csv.reader(csvFile)
for row in csv_data:
addresses.append(row[addressColumn])
#Reporting the number of addresses to the user.
totalAddresses = str(len(addresses))
print "Processing " + totalAddresses + " rows in file.\n" + str(float(totalAddresses)/float(60)/float(60)) + " hours remaining before processing is complete.\n" + str(float(totalAddresses)/float(60)) + " minutes remaining before processing is complete.\n"
rowsPassed = 0
rowsSkipped = 0
rowsFailed = 0
#Varibles for organizing geocoding results.
latitude = []
longitude = []
a = 1
#Loop for geociding addresses and storing the results includes error handling.
oldValue = ""
for value in addresses:
try:
if value == oldValue:
latitude.append((location.latitude))
longitude.append((location.longitude))
print "Current row in input file SKIPPED: " + str(a) + " Processed row " + str(a) + " of " + totalAddresses + "."
rowsSkipped += 1
a+=1
else:
location = geolocator.geocode([value])
latitude.append((location.latitude))
longitude.append((location.longitude))
print "Current row in input file PASSED: " + str(a) + " Processed row " + str(a) + " of " + totalAddresses + "."
oldValue = value
rowsPassed += 1
a+=1
except:
if fixIt(value):
location = geolocator.geocode([fixedAddress])
latitude.append((location.latitude))
longitude.append((location.longitude))
print "Current row in input file PASSED: " + str(a) + " Processed row " + str(a) + " of " + totalAddresses + "."
oldValue = value
rowsPassed += 1
a+=1
else:
latitude.append((" "))
longitude.append((" "))
print "Current row in input file FAILED: " + str(a) + " Processed row " + str(a) + " of " + totalAddresses + "."
rowsFailed += 1
a+=1
#Open the original csv and grab all the data, place it in a var called data, and close the file again.
f = open(inputFile)
data = [item for item in csv.reader(f)]
f.close()
#Create a blank arraycalled new_data
new_data = []
#For each item in data append a location, then add the complete item to the new data variable
for i, item in enumerate(data):
try:
item.append(latitude[i])
item.append(longitude[i])
new_data.append(item)
except:
item.append(" ")
new_data.append(item)
#Open the new csv and write the header row followed by a row for each object in the new_data array
f = open(outputFile, 'w')
csv.writer(f, lineterminator='\n').writerow(inputHeaders)
csv.writer(f, lineterminator='\n').writerows(new_data)
f.close()
#End processing message.
print "\n*****Processing Complete*****\n\n" + str(rowsPassed) + " out of " + totalAddresses + " rows were successfully geocoded.\n" + str(rowsSkipped) + " out of " + totalAddresses + " were duplicates and geocoded successfully.\n" + str(rowsFailed) + " out of " + totalAddresses + " rows failed to geocode successfully.\n" + str(100 * (float(rowsPassed)+float(rowsSkipped))/float(totalAddresses)) + "% of total addresses successfully geocoded."
#Geoprocessing call.
actualGeocoding()
|
|
# Copyright 2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
from itertools import groupby, chain
from future.moves.itertools import zip_longest
from devlib.utils.types import identifier
from wa.framework.configuration.core import (MetaConfiguration, RunConfiguration,
JobGenerator, settings)
from wa.framework.configuration.parsers import ConfigParser
from wa.framework.configuration.plugin_cache import PluginCache
from wa.framework.exception import NotFoundError, ConfigError
from wa.framework.job import Job
from wa.utils import log
from wa.utils.serializer import Podable
class CombinedConfig(Podable):
_pod_serialization_version = 1
@staticmethod
def from_pod(pod):
instance = super(CombinedConfig, CombinedConfig).from_pod(pod)
instance.settings = MetaConfiguration.from_pod(pod.get('settings', {}))
instance.run_config = RunConfiguration.from_pod(pod.get('run_config', {}))
return instance
def __init__(self, settings=None, run_config=None): # pylint: disable=redefined-outer-name
super(CombinedConfig, self).__init__()
self.settings = settings
self.run_config = run_config
def to_pod(self):
pod = super(CombinedConfig, self).to_pod()
pod['settings'] = self.settings.to_pod()
pod['run_config'] = self.run_config.to_pod()
return pod
@staticmethod
def _pod_upgrade_v1(pod):
pod['_pod_version'] = pod.get('_pod_version', 1)
return pod
class ConfigManager(object):
"""
Represents run-time state of WA. Mostly used as a container for loaded
configuration and discovered plugins.
This exists outside of any command or run and is associated with the running
instance of wA itself.
"""
@property
def enabled_instruments(self):
return self.jobs_config.enabled_instruments
@property
def enabled_processors(self):
return self.jobs_config.enabled_processors
@property
def job_specs(self):
if not self._jobs_generated:
msg = 'Attempting to access job specs before '\
'jobs have been generated'
raise RuntimeError(msg)
return [j.spec for j in self._jobs]
@property
def jobs(self):
if not self._jobs_generated:
msg = 'Attempting to access jobs before '\
'they have been generated'
raise RuntimeError(msg)
return self._jobs
def __init__(self, settings=settings): # pylint: disable=redefined-outer-name
self.settings = settings
self.run_config = RunConfiguration()
self.plugin_cache = PluginCache()
self.jobs_config = JobGenerator(self.plugin_cache)
self.loaded_config_sources = []
self._config_parser = ConfigParser()
self._jobs = []
self._jobs_generated = False
self.agenda = None
def load_config_file(self, filepath):
includes = self._config_parser.load_from_path(self, filepath)
self.loaded_config_sources.append(filepath)
self.loaded_config_sources.extend(includes)
def load_config(self, values, source):
self._config_parser.load(self, values, source)
self.loaded_config_sources.append(source)
def get_plugin(self, name=None, kind=None, *args, **kwargs):
return self.plugin_cache.get_plugin(identifier(name), kind, *args, **kwargs)
def get_instruments(self, target):
instruments = []
for name in self.enabled_instruments:
try:
instruments.append(self.get_plugin(name, kind='instrument',
target=target))
except NotFoundError:
msg = 'Instrument "{}" not found'
raise NotFoundError(msg.format(name))
return instruments
def get_processors(self):
processors = []
for name in self.enabled_processors:
try:
proc = self.plugin_cache.get_plugin(name, kind='output_processor')
except NotFoundError:
msg = 'Output Processor "{}" not found'
raise NotFoundError(msg.format(name))
processors.append(proc)
return processors
def get_config(self):
return CombinedConfig(self.settings, self.run_config)
def finalize(self):
if not self.agenda:
msg = 'Attempting to finalize config before agenda has been set'
raise RuntimeError(msg)
self.run_config.merge_device_config(self.plugin_cache)
return self.get_config()
def generate_jobs(self, context):
job_specs = self.jobs_config.generate_job_specs(context.tm)
if not job_specs:
msg = 'No jobs available for running.'
raise ConfigError(msg)
exec_order = self.run_config.execution_order
log.indent()
for spec, i in permute_iterations(job_specs, exec_order):
job = Job(spec, i, context)
job.load(context.tm.target)
self._jobs.append(job)
context.run_state.add_job(job)
log.dedent()
self._jobs_generated = True
def permute_by_workload(specs):
"""
This is that "classic" implementation that executes all iterations of a
workload spec before proceeding onto the next spec.
"""
for spec in specs:
for i in range(1, spec.iterations + 1):
yield (spec, i)
def permute_by_iteration(specs):
"""
Runs the first iteration for all benchmarks first, before proceeding to the
next iteration, i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2,
C1, C2...
If multiple sections where specified in the agenda, this will run all
sections for the first global spec first, followed by all sections for the
second spec, etc.
e.g. given sections X and Y, and global specs A and B, with 2 iterations,
this will run
X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
"""
groups = [list(g) for _, g in groupby(specs, lambda s: s.workload_id)]
all_tuples = []
for spec in chain(*groups):
all_tuples.append([(spec, i + 1)
for i in range(spec.iterations)])
for t in chain(*list(map(list, zip_longest(*all_tuples)))):
if t is not None:
yield t
def permute_by_section(specs):
"""
Runs the first iteration for all benchmarks first, before proceeding to the
next iteration, i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2,
C1, C2...
If multiple sections where specified in the agenda, this will run all specs
for the first section followed by all specs for the seciod section, etc.
e.g. given sections X and Y, and global specs A and B, with 2 iterations,
this will run
X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
"""
groups = [list(g) for _, g in groupby(specs, lambda s: s.section_id)]
all_tuples = []
for spec in chain(*groups):
all_tuples.append([(spec, i + 1)
for i in range(spec.iterations)])
for t in chain(*list(map(list, zip_longest(*all_tuples)))):
if t is not None:
yield t
def permute_randomly(specs):
"""
This will generate a random permutation of specs/iteration tuples.
"""
result = []
for spec in specs:
for i in range(1, spec.iterations + 1):
result.append((spec, i))
random.shuffle(result)
for t in result:
yield t
permute_map = {
'by_iteration': permute_by_iteration,
'by_workload': permute_by_workload,
'by_section': permute_by_section,
'random': permute_randomly,
}
def permute_iterations(specs, exec_order):
if exec_order not in permute_map:
msg = 'Unknown execution order "{}"; must be in: {}'
raise ValueError(msg.format(exec_order, list(permute_map.keys())))
return permute_map[exec_order](specs)
|
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for streaming results."""
from google.protobuf.struct_pb2 import ListValue
from google.protobuf.struct_pb2 import Value
from google.cloud.proto.spanner.v1 import type_pb2
import six
# pylint: disable=ungrouped-imports
from google.cloud.spanner._helpers import _parse_value_pb
# pylint: enable=ungrouped-imports
class StreamedResultSet(object):
"""Process a sequence of partial result sets into a single set of row data.
:type response_iterator:
:param response_iterator:
Iterator yielding
:class:`google.cloud.proto.spanner.v1.result_set_pb2.PartialResultSet`
instances.
:type source: :class:`~google.cloud.spanner.snapshot.Snapshot`
:param source: Snapshot from which the result set was fetched.
"""
def __init__(self, response_iterator, source=None):
self._response_iterator = response_iterator
self._rows = [] # Fully-processed rows
self._counter = 0 # Counter for processed responses
self._metadata = None # Until set from first PRS
self._stats = None # Until set from last PRS
self._resume_token = None # To resume from last received PRS
self._current_row = [] # Accumulated values for incomplete row
self._pending_chunk = None # Incomplete value
self._source = source # Source snapshot
@property
def rows(self):
"""Fully-processed rows.
:rtype: list of row-data lists.
:returns: list of completed row data, from proceesd PRS responses.
"""
return self._rows
@property
def fields(self):
"""Field descriptors for result set columns.
:rtype: list of :class:`~google.cloud.proto.spanner.v1.type_pb2.Field`
:returns: list of fields describing column names / types.
"""
return self._metadata.row_type.fields
@property
def metadata(self):
"""Result set metadata
:rtype: :class:`~.result_set_pb2.ResultSetMetadata`
:returns: structure describing the results
"""
return self._metadata
@property
def stats(self):
"""Result set statistics
:rtype:
:class:`~google.cloud.proto.spanner.v1.result_set_pb2.ResultSetStats`
:returns: structure describing status about the response
"""
return self._stats
@property
def resume_token(self):
"""Token for resuming interrupted read / query.
:rtype: bytes
:returns: token from last chunk of results.
"""
return self._resume_token
def _merge_chunk(self, value):
"""Merge pending chunk with next value.
:type value: :class:`~google.protobuf.struct_pb2.Value`
:param value: continuation of chunked value from previous
partial result set.
:rtype: :class:`~google.protobuf.struct_pb2.Value`
:returns: the merged value
"""
current_column = len(self._current_row)
field = self.fields[current_column]
merged = _merge_by_type(self._pending_chunk, value, field.type)
self._pending_chunk = None
return merged
def _merge_values(self, values):
"""Merge values into rows.
:type values: list of :class:`~google.protobuf.struct_pb2.Value`
:param values: non-chunked values from partial result set.
"""
width = len(self.fields)
for value in values:
index = len(self._current_row)
field = self.fields[index]
self._current_row.append(_parse_value_pb(value, field.type))
if len(self._current_row) == width:
self._rows.append(self._current_row)
self._current_row = []
def consume_next(self):
"""Consume the next partial result set from the stream.
Parse the result set into new/existing rows in :attr:`_rows`
"""
response = six.next(self._response_iterator)
self._counter += 1
self._resume_token = response.resume_token
if self._metadata is None: # first response
metadata = self._metadata = response.metadata
source = self._source
if source is not None and source._transaction_id is None:
source._transaction_id = metadata.transaction.id
if response.HasField('stats'): # last response
self._stats = response.stats
values = list(response.values)
if self._pending_chunk is not None:
values[0] = self._merge_chunk(values[0])
if response.chunked_value:
self._pending_chunk = values.pop()
self._merge_values(values)
def consume_all(self):
"""Consume the streamed responses until there are no more."""
while True:
try:
self.consume_next()
except StopIteration:
break
def __iter__(self):
iter_rows, self._rows[:] = self._rows[:], ()
while True:
if not iter_rows:
self.consume_next() # raises StopIteration
iter_rows, self._rows[:] = self._rows[:], ()
while iter_rows:
yield iter_rows.pop(0)
class Unmergeable(ValueError):
"""Unable to merge two values.
:type lhs: :class:`google.protobuf.struct_pb2.Value`
:param lhs: pending value to be merged
:type rhs: :class:`google.protobuf.struct_pb2.Value`
:param rhs: remaining value to be merged
:type type_: :class:`google.cloud.proto.spanner.v1.type_pb2.Type`
:param type_: field type of values being merged
"""
def __init__(self, lhs, rhs, type_):
message = "Cannot merge %s values: %s %s" % (
type_pb2.TypeCode.Name(type_.code), lhs, rhs)
super(Unmergeable, self).__init__(message)
def _unmergeable(lhs, rhs, type_):
"""Helper for '_merge_by_type'."""
raise Unmergeable(lhs, rhs, type_)
def _merge_float64(lhs, rhs, type_): # pylint: disable=unused-argument
"""Helper for '_merge_by_type'."""
lhs_kind = lhs.WhichOneof('kind')
if lhs_kind == 'string_value':
return Value(string_value=lhs.string_value + rhs.string_value)
rhs_kind = rhs.WhichOneof('kind')
array_continuation = (
lhs_kind == 'number_value' and
rhs_kind == 'string_value' and
rhs.string_value == '')
if array_continuation:
return lhs
raise Unmergeable(lhs, rhs, type_)
def _merge_string(lhs, rhs, type_): # pylint: disable=unused-argument
"""Helper for '_merge_by_type'."""
return Value(string_value=lhs.string_value + rhs.string_value)
_UNMERGEABLE_TYPES = (type_pb2.BOOL,)
def _merge_array(lhs, rhs, type_):
"""Helper for '_merge_by_type'."""
element_type = type_.array_element_type
if element_type.code in _UNMERGEABLE_TYPES:
# Individual values cannot be merged, just concatenate
lhs.list_value.values.extend(rhs.list_value.values)
return lhs
lhs, rhs = list(lhs.list_value.values), list(rhs.list_value.values)
first = rhs.pop(0)
if first.HasField('null_value'): # can't merge
lhs.append(first)
else:
last = lhs.pop()
try:
merged = _merge_by_type(last, first, element_type)
except Unmergeable:
lhs.append(last)
lhs.append(first)
else:
lhs.append(merged)
return Value(list_value=ListValue(values=(lhs + rhs)))
def _merge_struct(lhs, rhs, type_):
"""Helper for '_merge_by_type'."""
fields = type_.struct_type.fields
lhs, rhs = list(lhs.list_value.values), list(rhs.list_value.values)
candidate_type = fields[len(lhs) - 1].type
first = rhs.pop(0)
if (first.HasField('null_value') or
candidate_type.code in _UNMERGEABLE_TYPES):
lhs.append(first)
else:
last = lhs.pop()
lhs.append(_merge_by_type(last, first, candidate_type))
return Value(list_value=ListValue(values=lhs + rhs))
_MERGE_BY_TYPE = {
type_pb2.BOOL: _unmergeable,
type_pb2.INT64: _merge_string,
type_pb2.FLOAT64: _merge_float64,
type_pb2.STRING: _merge_string,
type_pb2.ARRAY: _merge_array,
type_pb2.STRUCT: _merge_struct,
type_pb2.BYTES: _merge_string,
}
def _merge_by_type(lhs, rhs, type_):
"""Helper for '_merge_chunk'."""
merger = _MERGE_BY_TYPE[type_.code]
return merger(lhs, rhs, type_)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import socket
import ssl
import sys
import time
import uuid
import eventlet
import greenlet
import kombu
import kombu.connection
import kombu.entity
import kombu.messaging
from quantum.openstack.common import cfg
from quantum.openstack.common.gettextutils import _
from quantum.openstack.common.rpc import amqp as rpc_amqp
from quantum.openstack.common.rpc import common as rpc_common
kombu_opts = [
cfg.StrOpt('kombu_ssl_version',
default='',
help='SSL version to use (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_keyfile',
default='',
help='SSL key file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_certfile',
default='',
help='SSL cert file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_ca_certs',
default='',
help=('SSL certification authority file '
'(valid only if SSL enabled)')),
cfg.StrOpt('rabbit_host',
default='localhost',
help='the RabbitMQ host'),
cfg.IntOpt('rabbit_port',
default=5672,
help='the RabbitMQ port'),
cfg.BoolOpt('rabbit_use_ssl',
default=False,
help='connect over SSL for RabbitMQ'),
cfg.StrOpt('rabbit_userid',
default='guest',
help='the RabbitMQ userid'),
cfg.StrOpt('rabbit_password',
default='guest',
help='the RabbitMQ password'),
cfg.StrOpt('rabbit_virtual_host',
default='/',
help='the RabbitMQ virtual host'),
cfg.IntOpt('rabbit_retry_interval',
default=1,
help='how frequently to retry connecting with RabbitMQ'),
cfg.IntOpt('rabbit_retry_backoff',
default=2,
help='how long to backoff for between retries when connecting '
'to RabbitMQ'),
cfg.IntOpt('rabbit_max_retries',
default=0,
help='maximum retries with trying to connect to RabbitMQ '
'(the default of 0 implies an infinite retry count)'),
cfg.BoolOpt('rabbit_durable_queues',
default=False,
help='use durable queues in RabbitMQ'),
]
cfg.CONF.register_opts(kombu_opts)
LOG = rpc_common.LOG
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, channel, callback, tag, **kwargs):
"""Declare a queue on an amqp channel.
'channel' is the amqp channel to use
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
queue name, exchange name, and other kombu options are
passed in here as a dictionary.
"""
self.callback = callback
self.tag = str(tag)
self.kwargs = kwargs
self.queue = None
self.reconnect(channel)
def reconnect(self, channel):
"""Re-declare the queue after a rabbit reconnect"""
self.channel = channel
self.kwargs['channel'] = channel
self.queue = kombu.entity.Queue(**self.kwargs)
self.queue.declare()
def consume(self, *args, **kwargs):
"""Actually declare the consumer on the amqp channel. This will
start the flow of messages from the queue. Using the
Connection.iterconsume() iterator will process the messages,
calling the appropriate callback.
If a callback is specified in kwargs, use that. Otherwise,
use the callback passed during __init__()
If kwargs['nowait'] is True, then this call will block until
a message is read.
Messages will automatically be acked if the callback doesn't
raise an exception
"""
options = {'consumer_tag': self.tag}
options['nowait'] = kwargs.get('nowait', False)
callback = kwargs.get('callback', self.callback)
if not callback:
raise ValueError("No callback defined")
def _callback(raw_message):
message = self.channel.message_to_python(raw_message)
try:
callback(message.payload)
message.ack()
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
self.queue.consume(*args, callback=_callback, **options)
def cancel(self):
"""Cancel the consuming from the queue, if it has started"""
try:
self.queue.cancel(self.tag)
except KeyError, e:
# NOTE(comstud): Kludge to get around a amqplib bug
if str(e) != "u'%s'" % self.tag:
raise
self.queue = None
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'"""
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
"""Init a 'direct' queue.
'channel' is the amqp channel to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
Other kombu options may be passed
"""
# Default options
options = {'durable': False,
'auto_delete': True,
'exclusive': True}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=msg_id,
type='direct',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(DirectConsumer, self).__init__(channel,
callback,
tag,
name=msg_id,
exchange=exchange,
routing_key=msg_id,
**options)
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'"""
def __init__(self, conf, channel, topic, callback, tag, name=None,
**kwargs):
"""Init a 'topic' queue.
:param channel: the amqp channel to use
:param topic: the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param tag: a unique ID for the consumer on the channel
:param name: optional queue name, defaults to topic
:paramtype name: str
Other kombu options may be passed as keyword arguments
"""
# Default options
options = {'durable': conf.rabbit_durable_queues,
'auto_delete': False,
'exclusive': False}
options.update(kwargs)
exchange_name = rpc_amqp.get_control_exchange(conf)
exchange = kombu.entity.Exchange(name=exchange_name,
type='topic',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(TopicConsumer, self).__init__(channel,
callback,
tag,
name=name or topic,
exchange=exchange,
routing_key=topic,
**options)
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'"""
def __init__(self, conf, channel, topic, callback, tag, **kwargs):
"""Init a 'fanout' queue.
'channel' is the amqp channel to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
Other kombu options may be passed
"""
unique = uuid.uuid4().hex
exchange_name = '%s_fanout' % topic
queue_name = '%s_fanout_%s' % (topic, unique)
# Default options
options = {'durable': False,
'auto_delete': True,
'exclusive': True}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(FanoutConsumer, self).__init__(channel, callback, tag,
name=queue_name,
exchange=exchange,
routing_key=topic,
**options)
class Publisher(object):
"""Base Publisher class"""
def __init__(self, channel, exchange_name, routing_key, **kwargs):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.exchange_name = exchange_name
self.routing_key = routing_key
self.kwargs = kwargs
self.reconnect(channel)
def reconnect(self, channel):
"""Re-establish the Producer after a rabbit reconnection"""
self.exchange = kombu.entity.Exchange(name=self.exchange_name,
**self.kwargs)
self.producer = kombu.messaging.Producer(exchange=self.exchange,
channel=channel,
routing_key=self.routing_key)
def send(self, msg):
"""Send a message"""
self.producer.publish(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'"""
def __init__(self, conf, channel, msg_id, **kwargs):
"""init a 'direct' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': False,
'auto_delete': True,
'exclusive': True}
options.update(kwargs)
super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
type='direct', **options)
class TopicPublisher(Publisher):
"""Publisher class for 'topic'"""
def __init__(self, conf, channel, topic, **kwargs):
"""init a 'topic' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': conf.rabbit_durable_queues,
'auto_delete': False,
'exclusive': False}
options.update(kwargs)
exchange_name = rpc_amqp.get_control_exchange(conf)
super(TopicPublisher, self).__init__(channel,
exchange_name,
topic,
type='topic',
**options)
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'"""
def __init__(self, conf, channel, topic, **kwargs):
"""init a 'fanout' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': False,
'auto_delete': True,
'exclusive': True}
options.update(kwargs)
super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
None, type='fanout', **options)
class NotifyPublisher(TopicPublisher):
"""Publisher class for 'notify'"""
def __init__(self, conf, channel, topic, **kwargs):
self.durable = kwargs.pop('durable', conf.rabbit_durable_queues)
super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
def reconnect(self, channel):
super(NotifyPublisher, self).reconnect(channel)
# NOTE(jerdfelt): Normally the consumer would create the queue, but
# we do this to ensure that messages don't get dropped if the
# consumer is started after we do
queue = kombu.entity.Queue(channel=channel,
exchange=self.exchange,
durable=self.durable,
name=self.routing_key,
routing_key=self.routing_key)
queue.declare()
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
self.consumers = []
self.consumer_thread = None
self.conf = conf
self.max_retries = self.conf.rabbit_max_retries
# Try forever?
if self.max_retries <= 0:
self.max_retries = None
self.interval_start = self.conf.rabbit_retry_interval
self.interval_stepping = self.conf.rabbit_retry_backoff
# max retry-interval = 30 seconds
self.interval_max = 30
self.memory_transport = False
if server_params is None:
server_params = {}
# Keys to translate from server_params to kombu params
server_params_to_kombu_params = {'username': 'userid'}
params = {}
for sp_key, value in server_params.iteritems():
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
params[p_key] = value
params.setdefault('hostname', self.conf.rabbit_host)
params.setdefault('port', self.conf.rabbit_port)
params.setdefault('userid', self.conf.rabbit_userid)
params.setdefault('password', self.conf.rabbit_password)
params.setdefault('virtual_host', self.conf.rabbit_virtual_host)
self.params = params
if self.conf.fake_rabbit:
self.params['transport'] = 'memory'
self.memory_transport = True
else:
self.memory_transport = False
if self.conf.rabbit_use_ssl:
self.params['ssl'] = self._fetch_ssl_params()
self.connection = None
self.reconnect()
def _fetch_ssl_params(self):
"""Handles fetching what ssl params
should be used for the connection (if any)"""
ssl_params = dict()
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
if self.conf.kombu_ssl_version:
ssl_params['ssl_version'] = self.conf.kombu_ssl_version
if self.conf.kombu_ssl_keyfile:
ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
if self.conf.kombu_ssl_certfile:
ssl_params['certfile'] = self.conf.kombu_ssl_certfile
if self.conf.kombu_ssl_ca_certs:
ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs
# We might want to allow variations in the
# future with this?
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
if not ssl_params:
# Just have the default behavior
return True
else:
# Return the extended behavior
return ssl_params
def _connect(self):
"""Connect to rabbit. Re-establish any queues that may have
been declared before if we are reconnecting. Exceptions should
be handled by the caller.
"""
if self.connection:
LOG.info(_("Reconnecting to AMQP server on "
"%(hostname)s:%(port)d") % self.params)
try:
self.connection.close()
except self.connection_errors:
pass
# Setting this in case the next statement fails, though
# it shouldn't be doing any network operations, yet.
self.connection = None
self.connection = kombu.connection.BrokerConnection(**self.params)
self.connection_errors = self.connection.connection_errors
if self.memory_transport:
# Kludge to speed up tests.
self.connection.transport.polling_interval = 0.0
self.consumer_num = itertools.count(1)
self.connection.connect()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
if self.memory_transport:
self.channel._new_queue('ae.undeliver')
for consumer in self.consumers:
consumer.reconnect(self.channel)
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d'),
self.params)
def reconnect(self):
"""Handles reconnecting and re-establishing queues.
Will retry up to self.max_retries number of times.
self.max_retries = 0 means to retry forever.
Sleep between tries, starting at self.interval_start
seconds, backing off self.interval_stepping number of seconds
each attempt.
"""
attempt = 0
while True:
attempt += 1
try:
self._connect()
return
except (self.connection_errors, IOError), e:
pass
except Exception, e:
# NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for
# a protocol response. (See paste link in LP888621)
# So, we check all exceptions for 'timeout' in them
# and try to reconnect in this case.
if 'timeout' not in str(e):
raise
log_info = {}
log_info['err_str'] = str(e)
log_info['max_retries'] = self.max_retries
log_info.update(self.params)
if self.max_retries and attempt == self.max_retries:
LOG.error(_('Unable to connect to AMQP server on '
'%(hostname)s:%(port)d after %(max_retries)d '
'tries: %(err_str)s') % log_info)
# NOTE(comstud): Copied from original code. There's
# really no better recourse because if this was a queue we
# need to consume on, we have no way to consume anymore.
sys.exit(1)
if attempt == 1:
sleep_time = self.interval_start or 1
elif attempt > 1:
sleep_time += self.interval_stepping
if self.interval_max:
sleep_time = min(sleep_time, self.interval_max)
log_info['sleep_time'] = sleep_time
LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
'unreachable: %(err_str)s. Trying again in '
'%(sleep_time)d seconds.') % log_info)
time.sleep(sleep_time)
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (self.connection_errors, socket.timeout, IOError), e:
if error_callback:
error_callback(e)
except Exception, e:
# NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for
# a protocol response. (See paste link in LP888621)
# So, we check all exceptions for 'timeout' in them
# and try to reconnect in this case.
if 'timeout' not in str(e):
raise
if error_callback:
error_callback(e)
self.reconnect()
def get_channel(self):
"""Convenience call for bin/clear_rabbit_queues"""
return self.channel
def close(self):
"""Close/release this connection"""
self.cancel_consumer_thread()
self.connection.release()
self.connection = None
def reset(self):
"""Reset a connection so it can be used again"""
self.cancel_consumer_thread()
self.channel.close()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
if self.memory_transport:
self.channel._new_queue('ae.undeliver')
self.consumers = []
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.channel, topic, callback,
self.consumer_num.next())
self.consumers.append(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers"""
info = {'do_consume': True}
def _error_callback(exc):
if isinstance(exc, socket.timeout):
LOG.exception(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
info['do_consume'] = True
def _consume():
if info['do_consume']:
queues_head = self.consumers[:-1]
queues_tail = self.consumers[-1]
for queue in queues_head:
queue.consume(nowait=True)
queues_tail.consume(nowait=False)
info['do_consume'] = False
return self.connection.drain_events(timeout=timeout)
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread"""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def publisher_send(self, cls, topic, msg, **kwargs):
"""Send to a publisher based on the publisher class"""
def _error_callback(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publish():
publisher = cls(self.conf, self.channel, topic, **kwargs)
publisher.send(msg)
self.ensure(_error_callback, _publish)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer"""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message"""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg):
"""Send a 'topic' message"""
self.publisher_send(TopicPublisher, topic, msg)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message"""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic"""
self.publisher_send(NotifyPublisher, topic, msg, **kwargs)
def consume(self, limit=None):
"""Consume from all queues/consumers"""
it = self.iterconsume(limit=limit)
while True:
try:
it.next()
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread"""
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object"""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
if fanout:
self.declare_fanout_consumer(topic, proxy_cb)
else:
self.declare_topic_consumer(topic, proxy_cb)
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object"""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.declare_topic_consumer(topic, proxy_cb, pool_name)
def create_connection(conf, new=True):
"""Create a connection"""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import mock
from google.cloud.automl_v1beta1 import AutoMlClient, PredictionServiceClient
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
from tests.providers.google.cloud.utils.base_gcp_mock import mock_base_gcp_hook_no_default_project_id
CREDENTIALS = "test-creds"
CLIENT_INFO = "client-info"
TASK_ID = "test-automl-hook"
GCP_PROJECT_ID = "test-project"
GCP_LOCATION = "test-location"
MODEL_NAME = "test_model"
MODEL_ID = "projects/198907790164/locations/us-central1/models/TBL9195602771183665152"
DATASET_ID = "TBL123456789"
MODEL = {
"display_name": MODEL_NAME,
"dataset_id": DATASET_ID,
"tables_model_metadata": {"train_budget_milli_node_hours": 1000},
}
LOCATION_PATH = AutoMlClient.location_path(GCP_PROJECT_ID, GCP_LOCATION)
MODEL_PATH = PredictionServiceClient.model_path(GCP_PROJECT_ID, GCP_LOCATION, MODEL_ID)
DATASET_PATH = AutoMlClient.dataset_path(GCP_PROJECT_ID, GCP_LOCATION, DATASET_ID)
INPUT_CONFIG = {"input": "value"}
OUTPUT_CONFIG = {"output": "value"}
PAYLOAD = {"test": "payload"}
DATASET = {"dataset_id": "data"}
MASK = {"field": "mask"}
class TestAuoMLHook(unittest.TestCase):
def setUp(self) -> None:
with mock.patch(
"airflow.providers.google.cloud.hooks.automl.CloudBaseHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = CloudAutoMLHook()
self.hook._get_credentials = mock.MagicMock( # type: ignore
return_value=CREDENTIALS
)
@mock.patch(
"airflow.providers.google.cloud.hooks.automl.CloudBaseHook.client_info",
new_callable=lambda: CLIENT_INFO,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient")
def test_get_conn(self, mock_automl_client, mock_client_info):
self.hook.get_conn()
mock_automl_client.assert_called_once_with(
credentials=CREDENTIALS, client_info=CLIENT_INFO
)
@mock.patch(
"airflow.providers.google.cloud.hooks.automl.CloudBaseHook.client_info",
new_callable=lambda: CLIENT_INFO,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.PredictionServiceClient")
def test_prediction_client(self, mock_prediction_client, mock_client_info):
client = self.hook.prediction_client # pylint: disable=unused-variable # noqa
mock_prediction_client.assert_called_once_with(
credentials=CREDENTIALS, client_info=CLIENT_INFO
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.create_model")
def test_create_model(self, mock_create_model):
self.hook.create_model(
model=MODEL, location=GCP_LOCATION, project_id=GCP_PROJECT_ID
)
mock_create_model.assert_called_once_with(
parent=LOCATION_PATH, model=MODEL, retry=None, timeout=None, metadata=None
)
@mock.patch(
"airflow.providers.google.cloud.hooks.automl.PredictionServiceClient.batch_predict"
)
def test_batch_predict(self, mock_batch_predict):
self.hook.batch_predict(
model_id=MODEL_ID,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
input_config=INPUT_CONFIG,
output_config=OUTPUT_CONFIG,
)
mock_batch_predict.assert_called_once_with(
name=MODEL_PATH,
input_config=INPUT_CONFIG,
output_config=OUTPUT_CONFIG,
params=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.PredictionServiceClient.predict")
def test_predict(self, mock_predict):
self.hook.predict(
model_id=MODEL_ID,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
payload=PAYLOAD,
)
mock_predict.assert_called_once_with(
name=MODEL_PATH,
payload=PAYLOAD,
params=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.create_dataset")
def test_create_dataset(self, mock_create_dataset):
self.hook.create_dataset(
dataset=DATASET, location=GCP_LOCATION, project_id=GCP_PROJECT_ID
)
mock_create_dataset.assert_called_once_with(
parent=LOCATION_PATH,
dataset=DATASET,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.import_data")
def test_import_dataset(self, mock_import_data):
self.hook.import_data(
dataset_id=DATASET_ID,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
input_config=INPUT_CONFIG,
)
mock_import_data.assert_called_once_with(
name=DATASET_PATH,
input_config=INPUT_CONFIG,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.list_column_specs")
def test_list_column_specs(self, mock_list_column_specs):
table_spec = "table_spec_id"
filter_ = "filter"
page_size = 42
self.hook.list_column_specs(
dataset_id=DATASET_ID,
table_spec_id=table_spec,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
field_mask=MASK,
filter_=filter_,
page_size=page_size,
)
parent = AutoMlClient.table_spec_path(
GCP_PROJECT_ID, GCP_LOCATION, DATASET_ID, table_spec
)
mock_list_column_specs.assert_called_once_with(
parent=parent,
field_mask=MASK,
filter_=filter_,
page_size=page_size,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.get_model")
def test_get_model(self, mock_get_model):
self.hook.get_model(
model_id=MODEL_ID, location=GCP_LOCATION, project_id=GCP_PROJECT_ID
)
mock_get_model.assert_called_once_with(
name=MODEL_PATH, retry=None, timeout=None, metadata=None
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.delete_model")
def test_delete_model(self, mock_delete_model):
self.hook.delete_model(
model_id=MODEL_ID, location=GCP_LOCATION, project_id=GCP_PROJECT_ID
)
mock_delete_model.assert_called_once_with(
name=MODEL_PATH, retry=None, timeout=None, metadata=None
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.update_dataset")
def test_update_dataset(self, mock_update_dataset):
self.hook.update_dataset(
dataset=DATASET, update_mask=MASK, project_id=GCP_PROJECT_ID
)
mock_update_dataset.assert_called_once_with(
dataset=DATASET, update_mask=MASK, retry=None, timeout=None, metadata=None
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.deploy_model")
def test_deploy_model(self, mock_deploy_model):
image_detection_metadata = {}
self.hook.deploy_model(
model_id=MODEL_ID,
image_detection_metadata=image_detection_metadata,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
)
mock_deploy_model.assert_called_once_with(
name=MODEL_PATH,
retry=None,
timeout=None,
metadata=None,
image_object_detection_model_deployment_metadata=image_detection_metadata,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.list_table_specs")
def test_list_table_specs(self, mock_list_table_specs):
filter_ = "filter"
page_size = 42
self.hook.list_table_specs(
dataset_id=DATASET_ID,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
filter_=filter_,
page_size=page_size,
)
mock_list_table_specs.assert_called_once_with(
parent=DATASET_PATH,
filter_=filter_,
page_size=page_size,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.list_datasets")
def test_list_datasets(self, mock_list_datasets):
self.hook.list_datasets(location=GCP_LOCATION, project_id=GCP_PROJECT_ID)
mock_list_datasets.assert_called_once_with(
parent=LOCATION_PATH, retry=None, timeout=None, metadata=None
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.delete_dataset")
def test_delete_dataset(self, mock_delete_dataset):
self.hook.delete_dataset(
dataset_id=DATASET_ID, location=GCP_LOCATION, project_id=GCP_PROJECT_ID
)
mock_delete_dataset.assert_called_once_with(
name=DATASET_PATH, retry=None, timeout=None, metadata=None
)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class LogProfilesOperations(object):
"""LogProfilesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-03-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-03-01"
self.config = config
def delete(
self, log_profile_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the log profile.
:param log_profile_name: The name of the log profile.
:type log_profile_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'logProfileName': self._serialize.url("log_profile_name", log_profile_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/subscriptions/{subscriptionId}/providers/microsoft.insights/logprofiles/{logProfileName}'}
def get(
self, log_profile_name, custom_headers=None, raw=False, **operation_config):
"""Gets the log profile.
:param log_profile_name: The name of the log profile.
:type log_profile_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: LogProfileResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.monitor.models.LogProfileResource or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'logProfileName': self._serialize.url("log_profile_name", log_profile_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LogProfileResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/microsoft.insights/logprofiles/{logProfileName}'}
def create_or_update(
self, log_profile_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Create or update a log profile in Azure Monitoring REST API.
:param log_profile_name: The name of the log profile.
:type log_profile_name: str
:param parameters: Parameters supplied to the operation.
:type parameters: ~azure.mgmt.monitor.models.LogProfileResource
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: LogProfileResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.monitor.models.LogProfileResource or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'logProfileName': self._serialize.url("log_profile_name", log_profile_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'LogProfileResource')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LogProfileResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/providers/microsoft.insights/logprofiles/{logProfileName}'}
def update(
self, log_profile_name, log_profiles_resource, custom_headers=None, raw=False, **operation_config):
"""Updates an existing LogProfilesResource. To update other fields use the
CreateOrUpdate method.
:param log_profile_name: The name of the log profile.
:type log_profile_name: str
:param log_profiles_resource: Parameters supplied to the operation.
:type log_profiles_resource:
~azure.mgmt.monitor.models.LogProfileResourcePatch
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: LogProfileResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.monitor.models.LogProfileResource or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>`
"""
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'logProfileName': self._serialize.url("log_profile_name", log_profile_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(log_profiles_resource, 'LogProfileResourcePatch')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LogProfileResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/providers/microsoft.insights/logprofiles/{logProfileName}'}
def list(
self, custom_headers=None, raw=False, **operation_config):
"""List the log profiles.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of LogProfileResource
:rtype:
~azure.mgmt.monitor.models.LogProfileResourcePaged[~azure.mgmt.monitor.models.LogProfileResource]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.LogProfileResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.LogProfileResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/microsoft.insights/logprofiles'}
|
|
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from org.o3project.odenos.core.component.network.flow.basic.basic_flow_match\
import BasicFlowMatch
class OFPFlowMatch(BasicFlowMatch):
# property key
IN_PHY_PORT = "in_phy_port"
METADATA = "metadata"
METADATA_MASK = "metadata_mask"
ETH_SRC = "eth_src"
ETH_SRC_MASK = "eth_src_mask"
ETH_DST = "eth_dst"
ETH_DST_MASK = "eth_dst_mask"
VLAN_VID = "vlan_vid"
VLAN_VID_MASK = "vlan_vid_mask"
VLAN_PCP = "vlan_pcp"
ETH_TYPE = "eth_type"
IP_DSCP = "ip_dscp"
IP_ECN = "ip_ecn"
IP_PROTO = "ip_proto"
IPV4_SRC = "ipv4_src"
IPV4_SRC_MASK = "ipv4_src_mask"
IPV4_DST = "ipv4_dst"
IPV4_DST_MASK = "ipv4_dst_mask"
TCP_SRC = "tcp_src"
TCP_DST = "tcp_dst"
UDP_SRC = "udp_src"
UDP_DST = "udp_dst"
SCTP_SRC = "sctp_src"
SCTP_DST = "sctp_dst"
ICMPV4_TYPE = "icmpv4_type"
ICMPV4_CODE = "icmpv4_code"
ARP_OP = "arp_op"
ARP_SPA = "arp_spa"
ARP_SPA_MASK = "arp_spa_mask"
ARP_TPA = "arp_tpa"
ARP_TPA_MASK = "arp_tpa_mask"
ARP_SHA = "arp_sha"
ARP_SHA_MASK = "arp_sha_mask"
ARP_THA = "arp_tha"
ARP_THA_MASK = "arp_tha_mask"
IPV6_SRC = "ipv6_src"
IPV6_SRC_MASK = "ipv6_src_mask"
IPV6_DST = "ipv6_dst"
IPV6_DST_MASK = "ipv6_dst_mask"
IPV6_FLABEL = "ipv6_flabel"
IPV6_FLABEL_MASK = "ipv6_flabel_mask"
ICMPV6_TYPE = "icmpv6_type"
ICMPV6_CODE = "icmpv6_code"
IPV6_ND_TARGET = "ipv6_nd_target"
IPV6_ND_SLL = "ipv6_nd_sll"
IPV6_ND_TLL = "ipv6_nd_tll"
MPLS_LABEL = "mpls_label"
MPLS_TC = "mpls_tc"
MPLS_BOS = "mpls_bos"
PBB_ISID = "pbb_isid"
PBB_ISID_MASK = "pbb_isid_mask"
TUNNEL_ID = "tunnel_id"
TUNNEL_ID_MASK = "tunnel_id_mask"
IPV6_EXTHDR = "ipv6_exthdr"
IPV6_EXTHDR_MASK = "ipv6_exthdr_mask"
def __init__(self, type_, in_node, in_port):
super(OFPFlowMatch, self).__init__(type_, in_node, in_port)
@property
def in_phy_port(self):
if self.IN_PHY_PORT not in self._body:
return None
return self._body[self.IN_PHY_PORT]
@in_phy_port.setter
def in_phy_port(self, val):
self._body[self.IN_PHY_PORT] = val
@property
def metadata(self):
if self.METADATA not in self._body:
return None
return self._body[self.METADATA]
@metadata.setter
def metadata(self, val):
self._body[self.METADATA] = val
@property
def metadata_mask(self):
if self.METADATA_MASK not in self._body:
return None
return self._body[self.METADATA_MASK]
@metadata_mask.setter
def metadata_mask(self, val):
self._body[self.METADATA_MASK] = val
@property
def eth_src(self):
if self.ETH_SRC not in self._body:
return None
return self._body[self.ETH_SRC]
@eth_src.setter
def eth_src(self, val):
self._body[self.ETH_SRC] = val
@property
def eth_src_mask(self):
if self.ETH_SRC_MASK not in self._body:
return None
return self._body[self.ETH_SRC_MASK]
@eth_src_mask.setter
def eth_src_mask(self, val):
self._body[self.ETH_SRC_MASK] = val
@property
def eth_dst(self):
if self.ETH_DST not in self._body:
return None
return self._body[self.ETH_DST]
@eth_dst.setter
def eth_dst(self, val):
self._body[self.ETH_DST] = val
@property
def eth_dst_mask(self):
if self.ETH_DST_MASK not in self._body:
return None
return self._body[self.ETH_DST_MASK]
@eth_dst_mask.setter
def eth_dst_mask(self, val):
self._body[self.ETH_DST_MASK] = val
@property
def vlan_vid(self):
if self.VLAN_VID not in self._body:
return None
return self._body[self.VLAN_VID]
@vlan_vid.setter
def vlan_vid(self, val):
self._body[self.VLAN_VID] = val
@property
def vlan_vid_mask(self):
if self.VLAN_VID_MASK not in self._body:
return None
return self._body[self.VLAN_VID_MASK]
@vlan_vid_mask.setter
def vlan_vid_mask(self, val):
self._body[self.VLAN_VID_MASK] = val
@property
def vlan_pcp(self):
if self.VLAN_PCP not in self._body:
return None
return self._body[self.VLAN_PCP]
@vlan_pcp.setter
def vlan_pcp(self, val):
self._body[self.VLAN_PCP] = val
@property
def eth_type(self):
if self.ETH_TYPE not in self._body:
return None
return self._body[self.ETH_TYPE]
@eth_type.setter
def eth_type(self, val):
self._body[self.ETH_TYPE] = val
@property
def ip_dscp(self):
if self.IP_DSCP not in self._body:
return None
return self._body[self.IP_DSCP]
@ip_dscp.setter
def ip_dscp(self, val):
self._body[self.IP_DSCP] = val
@property
def ip_ecn(self):
if self.IP_ECN not in self._body:
return None
return self._body[self.IP_ECN]
@ip_ecn.setter
def ip_ecn(self, val):
self._body[self.IP_ECN] = val
@property
def ip_proto(self):
if self.IP_PROTO not in self._body:
return None
return self._body[self.IP_PROTO]
@ip_proto.setter
def ip_proto(self, val):
self._body[self.IP_PROTO] = val
@property
def ipv4_src(self):
if self.IPV4_SRC not in self._body:
return None
return self._body[self.IPV4_SRC]
@ipv4_src.setter
def ipv4_src(self, val):
self._body[self.IPV4_SRC] = val
@property
def ipv4_src_mask(self):
if self.IPV4_SRC_MASK not in self._body:
return None
return self._body[self.IPV4_SRC_MASK]
@ipv4_src_mask.setter
def ipv4_src_mask(self, val):
self._body[self.IPV4_SRC_MASK] = val
@property
def ipv4_dst(self):
if self.IPV4_DST not in self._body:
return None
return self._body[self.IPV4_DST]
@ipv4_dst.setter
def ipv4_dst(self, val):
self._body[self.IPV4_DST] = val
@property
def ipv4_dst_mask(self):
if self.IPV4_DST_MASK not in self._body:
return None
return self._body[self.IPV4_DST_MASK]
@ipv4_dst_mask.setter
def ipv4_dst_mask(self, val):
self._body[self.IPV4_DST_MASK] = val
@property
def tcp_src(self):
if self.TCP_SRC not in self._body:
return None
return self._body[self.TCP_SRC]
@tcp_src.setter
def tcp_src(self, val):
self._body[self.TCP_SRC] = val
@property
def tcp_dst(self):
if self.TCP_DST not in self._body:
return None
return self._body[self.TCP_DST]
@tcp_dst.setter
def tcp_dst(self, val):
self._body[self.TCP_DST] = val
@property
def udp_src(self):
if self.UDP_SRC not in self._body:
return None
return self._body[self.UDP_SRC]
@udp_src.setter
def udp_src(self, val):
self._body[self.UDP_SRC] = val
@property
def udp_dst(self):
if self.UDP_DST not in self._body:
return None
return self._body[self.UDP_DST]
@udp_dst.setter
def udp_dst(self, val):
self._body[self.UDP_DST] = val
@property
def sctp_src(self):
if self.SCTP_SRC not in self._body:
return None
return self._body[self.SCTP_SRC]
@sctp_src.setter
def sctp_src(self, val):
self._body[self.SCTP_SRC] = val
@property
def sctp_dst(self):
if self.SCTP_DST not in self._body:
return None
return self._body[self.SCTP_DST]
@sctp_dst.setter
def sctp_dst(self, val):
self._body[self.SCTP_DST] = val
@property
def icmpv4_type(self):
if self.ICMPV4_TYPE not in self._body:
return None
return self._body[self.ICMPV4_TYPE]
@icmpv4_type.setter
def icmpv4_type(self, val):
self._body[self.ICMPV4_TYPE] = val
@property
def icmpv4_code(self):
if self.ICMPV4_CODE not in self._body:
return None
return self._body[self.ICMPV4_CODE]
@icmpv4_code.setter
def icmpv4_code(self, val):
self._body[self.ICMPV4_CODE] = val
@property
def arp_op(self):
if self.ARP_OP not in self._body:
return None
return self._body[self.ARP_OP]
@arp_op.setter
def arp_op(self, val):
self._body[self.ARP_OP] = val
@property
def arp_spa(self):
if self.ARP_SPA not in self._body:
return None
return self._body[self.ARP_SPA]
@arp_spa.setter
def arp_spa(self, val):
self._body[self.ARP_SPA] = val
@property
def arp_spa_mask(self):
if self.ARP_SPA_MASK not in self._body:
return None
return self._body[self.ARP_SPA_MASK]
@arp_spa_mask.setter
def arp_spa_mask(self, val):
self._body[self.ARP_SPA_MASK] = val
@property
def arp_tpa(self):
if self.ARP_TPA not in self._body:
return None
return self._body[self.ARP_TPA]
@arp_tpa.setter
def arp_tpa(self, val):
self._body[self.ARP_TPA] = val
@property
def arp_tpa_mask(self):
if self.ARP_TPA_MASK not in self._body:
return None
return self._body[self.ARP_TPA_MASK]
@arp_tpa_mask.setter
def arp_tpa_mask(self, val):
self._body[self.ARP_TPA_MASK] = val
@property
def arp_sha(self):
if self.ARP_SHA not in self._body:
return None
return self._body[self.ARP_SHA]
@arp_sha.setter
def arp_sha(self, val):
self._body[self.ARP_SHA] = val
@property
def arp_sha_mask(self):
if self.ARP_SHA_MASK not in self._body:
return None
return self._body[self.ARP_SHA_MASK]
@arp_sha_mask.setter
def arp_sha_mask(self, val):
self._body[self.ARP_SHA_MASK] = val
@property
def arp_tha(self):
if self.ARP_THA not in self._body:
return None
return self._body[self.ARP_THA]
@arp_tha.setter
def arp_tha(self, val):
self._body[self.ARP_THA] = val
@property
def arp_tha_mask(self):
if self.ARP_THA_MASK not in self._body:
return None
return self._body[self.ARP_THA_MASK]
@arp_tha_mask.setter
def arp_tha_mask(self, val):
self._body[self.ARP_THA_MASK] = val
@property
def ipv6_src(self):
if self.IPV6_SRC not in self._body:
return None
return self._body[self.IPV6_SRC]
@ipv6_src.setter
def ipv6_src(self, val):
self._body[self.IPV6_SRC] = val
@property
def ipv6_src_mask(self):
if self.IPV6_SRC_MASK not in self._body:
return None
return self._body[self.IPV6_SRC_MASK]
@ipv6_src_mask.setter
def ipv6_src_mask(self, val):
self._body[self.IPV6_SRC_MASK] = val
@property
def ipv6_dst(self):
if self.IPV6_DST not in self._body:
return None
return self._body[self.IPV6_DST]
@ipv6_dst.setter
def ipv6_dst(self, val):
self._body[self.IPV6_DST] = val
@property
def ipv6_dst_mask(self):
if self.IPV6_DST_MASK not in self._body:
return None
return self._body[self.IPV6_DST_MASK]
@ipv6_dst_mask.setter
def ipv6_dst_mask(self, val):
self._body[self.IPV6_DST_MASK] = val
@property
def ipv6_flabel(self):
if self.IPV6_FLABEL not in self._body:
return None
return self._body[self.IPV6_FLABEL]
@ipv6_flabel.setter
def ipv6_flabel(self, val):
self._body[self.IPV6_FLABEL] = val
@property
def ipv6_flabel_mask(self):
if self.IPV6_FLABEL_MASK not in self._body:
return None
return self._body[self.IPV6_FLABEL_MASK]
@ipv6_flabel_mask.setter
def ipv6_flabel_mask(self, val):
self._body[self.IPV6_FLABEL_MASK] = val
@property
def icmpv6_type(self):
if self.ICMPV6_TYPE not in self._body:
return None
return self._body[self.ICMPV6_TYPE]
@icmpv6_type.setter
def icmpv6_type(self, val):
self._body[self.ICMPV6_TYPE] = val
@property
def icmpv6_code(self):
if self.ICMPV6_CODE not in self._body:
return None
return self._body[self.ICMPV6_CODE]
@icmpv6_code.setter
def icmpv6_code(self, val):
self._body[self.ICMPV6_CODE] = val
@property
def ipv6_nd_target(self):
if self.IPV6_ND_TARGET not in self._body:
return None
return self._body[self.IPV6_ND_TARGET]
@ipv6_nd_target.setter
def ipv6_nd_target(self, val):
self._body[self.IPV6_ND_TARGET] = val
@property
def ipv6_nd_sll(self):
if self.IPV6_ND_SLL not in self._body:
return None
return self._body[self.IPV6_ND_SLL]
@ipv6_nd_sll.setter
def ipv6_nd_sll(self, val):
self._body[self.IPV6_ND_SLL] = val
@property
def ipv6_nd_tll(self):
if self.IPV6_ND_TLL not in self._body:
return None
return self._body[self.IPV6_ND_TLL]
@ipv6_nd_tll.setter
def ipv6_nd_tll(self, val):
self._body[self.IPV6_ND_TLL] = val
@property
def mpls_label(self):
if self.MPLS_LABEL not in self._body:
return None
return self._body[self.MPLS_LABEL]
@mpls_label.setter
def mpls_label(self, val):
self._body[self.MPLS_LABEL] = val
@property
def mpls_tc(self):
if self.MPLS_TC not in self._body:
return None
return self._body[self.MPLS_TC]
@mpls_tc.setter
def mpls_tc(self, val):
self._body[self.MPLS_TC] = val
@property
def mpls_bos(self):
if self.MPLS_BOS not in self._body:
return None
return self._body[self.MPLS_BOS]
@mpls_bos.setter
def mpls_bos(self, val):
self._body[self.MPLS_BOS] = val
@property
def pbb_isid(self):
if self.PBB_ISID not in self._body:
return None
return self._body[self.PBB_ISID]
@pbb_isid.setter
def pbb_isid(self, val):
self._body[self.PBB_ISID] = val
@property
def pbb_isid_mask(self):
if self.PBB_ISID_MASK not in self._body:
return None
return self._body[self.PBB_ISID_MASK]
@pbb_isid_mask.setter
def pbb_isid_mask(self, val):
self._body[self.PBB_ISID_MASK] = val
@property
def tunnel_id(self):
if self.TUNNEL_ID not in self._body:
return None
return self._body[self.TUNNEL_ID]
@tunnel_id.setter
def tunnel_id(self, val):
self._body[self.TUNNEL_ID] = val
@property
def tunnel_id_mask(self):
if self.TUNNEL_ID_MASK not in self._body:
return None
return self._body[self.TUNNEL_ID_MASK]
@tunnel_id_mask.setter
def tunnel_id_mask(self, val):
self._body[self.TUNNEL_ID_MASK] = val
@property
def ipv6_exthdr(self):
if self.IPV6_EXTHDR not in self._body:
return None
return self._body[self.IPV6_EXTHDR]
@ipv6_exthdr.setter
def ipv6_exthdr(self, val):
self._body[self.IPV6_EXTHDR] = val
@property
def ipv6_exthdr_mask(self):
if self.IPV6_EXTHDR_MASK not in self._body:
return None
return self._body[self.IPV6_EXTHDR_MASK]
@ipv6_exthdr_mask.setter
def ipv6_exthdr_mask(self, val):
self._body[self.IPV6_EXTHDR_MASK] = val
@classmethod
def create_from_packed(cls, packed):
in_port = None
if cls.IN_PORT in packed:
in_port = packed[cls.IN_PORT]
flow = cls(packed[cls.TYPE], packed[cls.IN_NODE], in_port)
if cls.IN_PHY_PORT in packed:
flow.in_phy_port = packed[cls.IN_PHY_PORT]
if cls.METADATA in packed:
flow.metadata = packed[cls.METADATA]
if cls.METADATA_MASK in packed:
flow.metadata_mask = packed[cls.METADATA_MASK]
if cls.ETH_SRC in packed:
flow.eth_src = packed[cls.ETH_SRC]
if cls.ETH_SRC_MASK in packed:
flow.eth_src_mask = packed[cls.ETH_SRC_MASK]
if cls.ETH_DST in packed:
flow.eth_dst = packed[cls.ETH_DST]
if cls.ETH_DST_MASK in packed:
flow.eth_dst_mask = packed[cls.ETH_DST_MASK]
if cls.VLAN_VID in packed:
flow.vlan_vid = packed[cls.VLAN_VID]
if cls.VLAN_VID_MASK in packed:
flow.vlan_vid_mask = packed[cls.VLAN_VID_MASK]
if cls.VLAN_PCP in packed:
flow.vlan_pcp = packed[cls.VLAN_PCP]
if cls.ETH_TYPE in packed:
flow.eth_type = packed[cls.ETH_TYPE]
if cls.IP_DSCP in packed:
flow.ip_dscp = packed[cls.IP_DSCP]
if cls.IP_ECN in packed:
flow.ip_ecn = packed[cls.IP_ECN]
if cls.IP_PROTO in packed:
flow.ip_proto = packed[cls.IP_PROTO]
if cls.IPV4_SRC in packed:
flow.ipv4_src = packed[cls.IPV4_SRC]
if cls.IPV4_SRC_MASK in packed:
flow.ipv4_src_mask = packed[cls.IPV4_SRC_MASK]
if cls.IPV4_DST in packed:
flow.ipv4_dst = packed[cls.IPV4_DST]
if cls.IPV4_DST_MASK in packed:
flow.ipv4_dst_mask = packed[cls.IPV4_DST_MASK]
if cls.TCP_SRC in packed:
flow.tcp_src = packed[cls.TCP_SRC]
if cls.TCP_DST in packed:
flow.tcp_dst = packed[cls.TCP_DST]
if cls.UDP_SRC in packed:
flow.udp_src = packed[cls.UDP_SRC]
if cls.UDP_DST in packed:
flow.udp_dst = packed[cls.UDP_DST]
if cls.SCTP_SRC in packed:
flow.sctp_src = packed[cls.SCTP_SRC]
if cls.SCTP_DST in packed:
flow.sctp_dst = packed[cls.SCTP_DST]
if cls.ICMPV4_TYPE in packed:
flow.icmpv4_type = packed[cls.ICMPV4_TYPE]
if cls.ICMPV4_CODE in packed:
flow.icmpv4_code = packed[cls.ICMPV4_CODE]
if cls.ARP_OP in packed:
flow.arp_op = packed[cls.ARP_OP]
if cls.ARP_SPA in packed:
flow.arp_spa = packed[cls.ARP_SPA]
if cls.ARP_SPA_MASK in packed:
flow.arp_spa_mask = packed[cls.ARP_SPA_MASK]
if cls.ARP_TPA in packed:
flow.arp_tpa = packed[cls.ARP_TPA]
if cls.ARP_TPA_MASK in packed:
flow.arp_tpa_mask = packed[cls.ARP_TPA_MASK]
if cls.ARP_SHA in packed:
flow.arp_sha = packed[cls.ARP_SHA]
if cls.ARP_SHA_MASK in packed:
flow.arp_sha_mask = packed[cls.ARP_SHA_MASK]
if cls.ARP_THA in packed:
flow.arp_tha = packed[cls.ARP_THA]
if cls.ARP_THA_MASK in packed:
flow.arp_tha_mask = packed[cls.ARP_THA_MASK]
if cls.IPV6_SRC in packed:
flow.ipv6_src = packed[cls.IPV6_SRC]
if cls.IPV6_SRC_MASK in packed:
flow.ipv6_src_mask = packed[cls.IPV6_SRC_MASK]
if cls.IPV6_DST in packed:
flow.ipv6_dst = packed[cls.IPV6_DST]
if cls.IPV6_DST_MASK in packed:
flow.ipv6_dst_mask = packed[cls.IPV6_DST_MASK]
if cls.IPV6_FLABEL in packed:
flow.ipv6_flabel = packed[cls.IPV6_FLABEL]
if cls.IPV6_FLABEL_MASK in packed:
flow.ipv6_flabel_mask = packed[cls.IPV6_FLABEL_MASK]
if cls.ICMPV6_TYPE in packed:
flow.icmpv6_type = packed[cls.ICMPV6_TYPE]
if cls.ICMPV6_CODE in packed:
flow.icmpv6_code = packed[cls.ICMPV6_CODE]
if cls.IPV6_ND_TARGET in packed:
flow.ipv6_nd_target = packed[cls.IPV6_ND_TARGET]
if cls.IPV6_ND_SLL in packed:
flow.ipv6_nd_sll = packed[cls.IPV6_ND_SLL]
if cls.IPV6_ND_TLL in packed:
flow.ipv6_nd_tll = packed[cls.IPV6_ND_TLL]
if cls.MPLS_LABEL in packed:
flow.mpls_label = packed[cls.MPLS_LABEL]
if cls.MPLS_TC in packed:
flow.mpls_tc = packed[cls.MPLS_TC]
if cls.MPLS_BOS in packed:
flow.mpls_bos = packed[cls.MPLS_BOS]
if cls.PBB_ISID in packed:
flow.pbb_isid = packed[cls.PBB_ISID]
if cls.PBB_ISID_MASK in packed:
flow.pbb_isid_mask = packed[cls.PBB_ISID_MASK]
if cls.TUNNEL_ID in packed:
flow.tunnel_id = packed[cls.TUNNEL_ID]
if cls.TUNNEL_ID_MASK in packed:
flow.tunnel_id_mask = packed[cls.TUNNEL_ID_MASK]
if cls.IPV6_EXTHDR in packed:
flow.ipv6_exthdr = packed[cls.IPV6_EXTHDR]
if cls.IPV6_EXTHDR_MASK in packed:
flow.ipv6_exthdr_mask = packed[cls.IPV6_EXTHDR_MASK]
return flow
def packed_object(self):
return self._body
|
|
from __future__ import absolute_import
from ...spec.base import NullContext
from ...scan import Dispatcher
from ...errs import SchemaError
from ...utils import scope_compose, get_or_none
from ...consts import private
from ...spec.v1_2.objects import (
ResourceList,
Resource,
Operation,
Authorization,
Parameter,
Model,
)
from ...spec.v2_0 import objects
import os
import six
def update_type_and_ref(dst, src, scope, sep, app):
ref = getattr(src, '$ref')
if ref:
dst.update_field('$ref', '#/definitions/' + scope_compose(scope, ref, sep=sep))
if app.prim_factory.is_primitive(getattr(src, 'type', None)):
dst.update_field('type', src.type.lower())
elif src.type:
dst.update_field('$ref', '#/definitions/' + scope_compose(scope, src.type, sep=sep))
def convert_min_max(dst, src):
def _from_str(name):
v = getattr(src, name, None)
if v:
if src.type == 'integer':
# we need to handle 1.0 when converting to int
# that's why we need to convert to float first
dst.update_field(name, int(float(v)))
elif src.type == 'number':
dst.update_field(name, float(v))
else:
raise SchemaError('minimum/maximum is only allowed on integer/number, not {0}'.format(src.type))
else:
dst.update_field(name, None)
_from_str('minimum')
_from_str('maximum')
def convert_schema_from_datatype(obj, scope, sep, app):
if obj == None:
return None
s = objects.Schema(NullContext())
update_type_and_ref(s, obj, scope, sep, app)
s.update_field('format', obj.format)
if obj.is_set('defaultValue'):
s.update_field('default', obj.defaultValue)
convert_min_max(s, obj)
s.update_field('uniqueItems', obj.uniqueItems)
s.update_field('enum', obj.enum)
if obj.items:
i = objects.Schema(NullContext())
update_type_and_ref(i, obj.items, scope, sep, app)
i.update_field('format', obj.items.format)
s.update_field('items', i)
return s
def convert_items(o, app):
item = objects.Items(NullContext())
if getattr(o, '$ref'):
raise SchemaError('Can\'t have $ref for Items')
if not app.prim_factory.is_primitive(getattr(o, 'type', None)):
raise SchemaError('Non primitive type is not allowed for Items')
item.update_field('type', o.type.lower())
item.update_field('format', o.format)
return item
class Upgrade(object):
""" convert 1.2 object to 2.0 object
"""
class Disp(Dispatcher): pass
def __init__(self, sep=private.SCOPE_SEPARATOR):
self.__swagger = None
self.__sep = sep
@Disp.register([ResourceList])
def _resource_list(self, path, obj, app):
o = objects.Swagger(NullContext())
# Info Object
info = objects.Info(NullContext())
info.update_field('version', obj.apiVersion)
info.update_field('title', get_or_none(obj, 'info','title'))
info.update_field('description', get_or_none(obj, 'info', 'description'))
info.update_field('termsOfService', get_or_none(obj, 'info', 'termsOfServiceUrl'))
# Contact Object
if obj.info.contact:
contact = objects.Contact(NullContext())
contact.update_field('email', get_or_none(obj, 'info', 'contact'))
info.update_field('contact', contact)
# License Object
if obj.info.license or obj.info.licenseUrl:
license = objects.License(NullContext())
license.update_field('name', get_or_none(obj, 'info', 'license'))
license.update_field('url', get_or_none(obj, 'info', 'licenseUrl'))
info.update_field('license', license)
o.update_field('info', info)
o.update_field('swagger', '2.0')
o.update_field('schemes', ['http', 'https'])
o.update_field('host', '')
o.update_field('basePath', '')
o.update_field('tags', [])
o.update_field('definitions', {})
o.update_field('parameters', {})
o.update_field('responses', {})
o.update_field('paths', {})
o.update_field('security', [])
o.update_field('securityDefinitions', {})
o.update_field('consumes', [])
o.update_field('produces', [])
self.__swagger = o
@Disp.register([Resource])
def _resource(self, path, obj, app):
name = obj.get_name(path)
for t in self.__swagger.tags:
if t.name == name:
break
else:
tt = objects.Tag(NullContext())
tt.update_field('name', name)
self.__swagger.tags.append(tt)
@Disp.register([Operation])
def _operation(self, path, obj, app):
o = objects.Operation(NullContext())
scope = obj._parent_.get_name(path)
o.update_field('tags', [scope])
o.update_field('operationId', obj.nickname)
o.update_field('summary', obj.summary)
o.update_field('description', obj.notes)
o.update_field('deprecated', obj.deprecated == 'true')
c = obj.consumes if obj.consumes and len(obj.consumes) > 0 else obj._parent_.consumes
o.update_field('consumes', c if c else [])
p = obj.produces if obj.produces and len(obj.produces) > 0 else obj._parent_.produces
o.update_field('produces', p if p else [])
o.update_field('parameters', [])
o.update_field('security', [])
# if there is not authorizations in this operation,
# looking for it in resource object.
_auth = obj.authorizations if obj.authorizations and len(obj.authorizations) > 0 else obj._parent_.authorizations
if _auth:
for name, scopes in six.iteritems(_auth):
o.security.append({name: [v.scope for v in scopes]})
# Operation return value
o.update_field('responses', {})
resp = objects.Response(NullContext())
if obj.type != 'void':
resp.update_field('schema', convert_schema_from_datatype(obj, scope, self.__sep, app))
o.responses['default'] = resp
path = obj._parent_.basePath + obj.path
if path not in self.__swagger.paths:
self.__swagger.paths[path] = objects.PathItem(NullContext())
method = obj.method.lower()
self.__swagger.paths[path].update_field(method, o)
@Disp.register([Authorization])
def _authorization(self, path, obj, app):
o = objects.SecurityScheme(NullContext())
if obj.type == 'basicAuth':
o.update_field('type', 'basic')
else:
o.update_field('type', obj.type)
o.update_field('scopes', {})
for s in obj.scopes or []:
o.scopes[s.scope] = s.description
if o.type == 'oauth2':
o.update_field('authorizationUrl', get_or_none(obj, 'grantTypes', 'implicit', 'loginEndpoint', 'url'))
o.update_field('tokenUrl', get_or_none(obj, 'grantTypes', 'authorization_code', 'tokenEndpoint', 'url'))
if o.authorizationUrl:
o.update_field('flow', 'implicit')
elif o.tokenUrl:
o.update_field('flow', 'access_code')
elif o.type == 'apiKey':
o.update_field('name', obj.keyname)
o.update_field('in', obj.passAs)
self.__swagger.securityDefinitions[obj.get_name(path)] = o
@Disp.register([Parameter])
def _parameter(self, path, obj, app):
o = objects.Parameter(NullContext())
scope = obj._parent_._parent_.get_name(path)
o.update_field('name', obj.name)
o.update_field('required', obj.required)
o.update_field('description', obj.description)
if obj.paramType == 'form':
o.update_field('in', 'formData')
else:
o.update_field('in', obj.paramType)
if 'body' == getattr(o, 'in'):
o.update_field('schema', convert_schema_from_datatype(obj, scope, self.__sep, app))
else:
if getattr(obj, '$ref'):
raise SchemaError('Can\'t have $ref in non-body Parameters')
if obj.allowMultiple == True and obj.items == None:
o.update_field('type', 'array')
o.update_field('collectionFormat', 'csv')
o.update_field('uniqueItems', obj.uniqueItems)
o.update_field('items', convert_items(obj, app))
if obj.is_set("defaultValue"):
o.update_field('default', [obj.defaultValue])
o.items.update_field('enum', obj.enum)
else:
o.update_field('type', obj.type.lower())
o.update_field('format', obj.format)
if obj.is_set("defaultValue"):
o.update_field('default', obj.defaultValue)
convert_min_max(o, obj)
o.update_field('enum', obj.enum)
if obj.items:
o.update_field('collectionFormat', 'csv')
o.update_field('uniqueItems', obj.uniqueItems)
o.update_field('items', convert_items(obj.items, app))
path = obj._parent_._parent_.basePath + obj._parent_.path
method = obj._parent_.method.lower()
op = getattr(self.__swagger.paths[path], method)
op.parameters.append(o)
@Disp.register([Model])
def _model(self, path, obj, app):
scope = obj._parent_.get_name(path)
s = scope_compose(scope, obj.get_name(path), sep=self.__sep)
o = self.__swagger.definitions.get(s, None)
if not o:
o = objects.Schema(NullContext())
self.__swagger.definitions[s] = o
props = {}
for name, prop in six.iteritems(obj.properties):
props[name] = convert_schema_from_datatype(prop, scope, self.__sep, app)
props[name].update_field('description', prop.description)
o.update_field('properties', props)
o.update_field('required', obj.required)
o.update_field('discriminator', obj.discriminator)
o.update_field('description', obj.description)
for t in obj.subTypes or []:
# here we assume those child models belongs to
# the same resource.
sub_s = scope_compose(scope, t, sep=self.__sep)
sub_o = self.__swagger.definitions.get(sub_s, None)
if not sub_o:
sub_o = objects.Schema(NullContext())
self.__swagger.definitions[sub_s] = sub_o
new_ref = objects.Schema(NullContext())
new_ref.update_field('$ref', '#/definitions/' + s)
sub_o.allOf.append(new_ref)
@property
def swagger(self):
""" some preparation before returning Swagger object
"""
# prepare Swagger.host & Swagger.basePath
if not self.__swagger:
return None
common_path = os.path.commonprefix(list(self.__swagger.paths))
# remove tailing slash,
# because all paths in Paths Object would prefixed with slah.
common_path = common_path[:-1] if common_path[-1] == '/' else common_path
if len(common_path) > 0:
p = six.moves.urllib.parse.urlparse(common_path)
self.__swagger.update_field('host', p.netloc)
new_common_path = six.moves.urllib.parse.urlunparse((
p.scheme, p.netloc, '', '', '', ''))
new_path = {}
for k in self.__swagger.paths.keys():
new_path[k[len(new_common_path):]] = self.__swagger.paths[k]
self.__swagger.update_field('paths', new_path)
return self.__swagger
|
|
# Copyright 2015 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for iRMC Management Driver
"""
import os
import xml.etree.ElementTree as ET
import mock
from ironic.common import boot_devices
from ironic.common import driver_factory
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers.modules import ipmitool
from ironic.drivers.modules.irmc import common as irmc_common
from ironic.drivers.modules.irmc import management as irmc_management
from ironic.drivers import utils as driver_utils
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.drivers import third_party_driver_mock_specs as mock_specs
from ironic.tests.objects import utils as obj_utils
INFO_DICT = db_utils.get_test_irmc_info()
class IRMCManagementTestCase(db_base.DbTestCase):
def setUp(self):
super(IRMCManagementTestCase, self).setUp()
driver_info = INFO_DICT
mgr_utils.mock_the_extension_manager(driver="fake_irmc")
self.driver = driver_factory.get_driver("fake_irmc")
self.node = obj_utils.create_test_node(self.context,
driver='fake_irmc',
driver_info=driver_info)
self.info = irmc_common.parse_driver_info(self.node)
def test_get_properties(self):
expected = irmc_common.COMMON_PROPERTIES
expected.update(ipmitool.COMMON_PROPERTIES)
expected.update(ipmitool.CONSOLE_PROPERTIES)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.get_properties())
@mock.patch.object(irmc_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate(self, mock_drvinfo):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.management.validate(task)
mock_drvinfo.assert_called_once_with(task.node)
@mock.patch.object(irmc_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate_fail(self, mock_drvinfo):
side_effect = iter([exception.InvalidParameterValue("Invalid Input")])
mock_drvinfo.side_effect = side_effect
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.management.validate,
task)
def test_management_interface_get_supported_boot_devices(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
expected = [boot_devices.PXE, boot_devices.DISK,
boot_devices.CDROM, boot_devices.BIOS,
boot_devices.SAFE]
self.assertEqual(sorted(expected), sorted(task.driver.management.
get_supported_boot_devices(task)))
@mock.patch.object(ipmitool.IPMIManagement, 'set_boot_device',
spec_set=True, autospec=True)
def test_management_interface_set_boot_device_no_mode_ok(
self,
set_boot_device_mock):
"""no boot mode specified."""
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.set_boot_device(task, boot_devices.PXE)
set_boot_device_mock.assert_called_once_with(
task.driver.management, task,
boot_devices.PXE,
False)
@mock.patch.object(ipmitool.IPMIManagement, 'set_boot_device',
spec_set=True, autospec=True)
def test_management_interface_set_boot_device_bios_ok(
self,
set_boot_device_mock):
"""bios mode specified."""
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_utils.add_node_capability(task, 'boot_mode', 'bios')
task.driver.management.set_boot_device(task, boot_devices.PXE)
set_boot_device_mock.assert_called_once_with(
task.driver.management, task,
boot_devices.PXE,
False)
@mock.patch.object(irmc_management.ipmitool, "send_raw", spec_set=True,
autospec=True)
def _test_management_interface_set_boot_device_uefi_ok(self, params,
expected_raw_code,
send_raw_mock):
send_raw_mock.return_value = [None, None]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = ''
driver_utils.add_node_capability(task, 'boot_mode', 'uefi')
self.driver.management.set_boot_device(task, **params)
send_raw_mock.assert_has_calls([
mock.call(task, "0x00 0x08 0x03 0x08"),
mock.call(task, expected_raw_code)])
def test_management_interface_set_boot_device_uefi_ok_pxe(self):
params = {'device': boot_devices.PXE, 'persistent': False}
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xa0 0x04 0x00 0x00 0x00")
params['persistent'] = True
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xe0 0x04 0x00 0x00 0x00")
def test_management_interface_set_boot_device_uefi_ok_disk(self):
params = {'device': boot_devices.DISK, 'persistent': False}
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xa0 0x08 0x00 0x00 0x00")
params['persistent'] = True
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xe0 0x08 0x00 0x00 0x00")
def test_management_interface_set_boot_device_uefi_ok_cdrom(self):
params = {'device': boot_devices.CDROM, 'persistent': False}
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xa0 0x14 0x00 0x00 0x00")
params['persistent'] = True
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xe0 0x14 0x00 0x00 0x00")
def test_management_interface_set_boot_device_uefi_ok_bios(self):
params = {'device': boot_devices.BIOS, 'persistent': False}
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xa0 0x18 0x00 0x00 0x00")
params['persistent'] = True
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xe0 0x18 0x00 0x00 0x00")
def test_management_interface_set_boot_device_uefi_ok_safe(self):
params = {'device': boot_devices.SAFE, 'persistent': False}
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xa0 0x0c 0x00 0x00 0x00")
params['persistent'] = True
self._test_management_interface_set_boot_device_uefi_ok(
params,
"0x00 0x08 0x05 0xe0 0x0c 0x00 0x00 0x00")
@mock.patch.object(irmc_management.ipmitool, "send_raw", spec_set=True,
autospec=True)
def test_management_interface_set_boot_device_uefi_ng(self,
send_raw_mock):
"""uefi mode, next boot only, unknown device."""
send_raw_mock.return_value = [None, None]
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_utils.add_node_capability(task, 'boot_mode', 'uefi')
self.assertRaises(exception.InvalidParameterValue,
self.driver.management.set_boot_device,
task,
"unknown")
@mock.patch.object(irmc_management, 'scci',
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
@mock.patch.object(irmc_common, 'get_irmc_report', spec_set=True,
autospec=True)
def test_management_interface_get_sensors_data_scci_ok(
self, mock_get_irmc_report, mock_scci):
"""'irmc_sensor_method' = 'scci' specified and OK data."""
with open(os.path.join(os.path.dirname(__file__),
'fake_sensors_data_ok.xml'), "r") as report:
fake_txt = report.read()
fake_xml = ET.fromstring(fake_txt)
mock_get_irmc_report.return_value = fake_xml
mock_scci.get_sensor_data.return_value = fake_xml.find(
"./System/SensorDataRecords")
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_info['irmc_sensor_method'] = 'scci'
sensor_dict = self.driver.management.get_sensors_data(task)
expected = {
'Fan (4)': {
'FAN1 SYS (29)': {
'Units': 'RPM',
'Sensor ID': 'FAN1 SYS (29)',
'Sensor Reading': '600 RPM'
},
'FAN2 SYS (29)': {
'Units': 'None',
'Sensor ID': 'FAN2 SYS (29)',
'Sensor Reading': 'None None'
}
},
'Temperature (1)': {
'Systemboard 1 (7)': {
'Units': 'degree C',
'Sensor ID': 'Systemboard 1 (7)',
'Sensor Reading': '80 degree C'
},
'Ambient (55)': {
'Units': 'degree C',
'Sensor ID': 'Ambient (55)',
'Sensor Reading': '42 degree C'
}
}
}
self.assertEqual(expected, sensor_dict)
@mock.patch.object(irmc_management, 'scci',
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
@mock.patch.object(irmc_common, 'get_irmc_report', spec_set=True,
autospec=True)
def test_management_interface_get_sensors_data_scci_ng(
self, mock_get_irmc_report, mock_scci):
"""'irmc_sensor_method' = 'scci' specified and NG data."""
with open(os.path.join(os.path.dirname(__file__),
'fake_sensors_data_ng.xml'), "r") as report:
fake_txt = report.read()
fake_xml = ET.fromstring(fake_txt)
mock_get_irmc_report.return_value = fake_xml
mock_scci.get_sensor_data.return_value = fake_xml.find(
"./System/SensorDataRecords")
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_info['irmc_sensor_method'] = 'scci'
sensor_dict = self.driver.management.get_sensors_data(task)
self.assertEqual(len(sensor_dict), 0)
@mock.patch.object(ipmitool.IPMIManagement, 'get_sensors_data',
spec_set=True, autospec=True)
def test_management_interface_get_sensors_data_ipmitool_ok(
self,
get_sensors_data_mock):
"""'irmc_sensor_method' = 'ipmitool' specified."""
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_info['irmc_sensor_method'] = 'ipmitool'
task.driver.management.get_sensors_data(task)
get_sensors_data_mock.assert_called_once_with(
task.driver.management, task)
@mock.patch.object(irmc_common, 'get_irmc_report', spec_set=True,
autospec=True)
def test_management_interface_get_sensors_data_exception(
self,
get_irmc_report_mock):
"""'FailedToGetSensorData Exception."""
get_irmc_report_mock.side_effect = exception.InvalidParameterValue(
"Fake Error")
irmc_management.scci.SCCIInvalidInputError = Exception
irmc_management.scci.SCCIClientError = Exception
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_info['irmc_sensor_method'] = 'scci'
e = self.assertRaises(exception.FailedToGetSensorData,
self.driver.management.get_sensors_data,
task)
self.assertEqual("Failed to get sensor data for node 1be26c0b-" +
"03f2-4d2e-ae87-c02d7f33c123. Error: Fake Error",
str(e))
|
|
#!/usr/bin/env python3
# import rospkg
import cv2
import numpy as np
import random
# rospack = rospkg.RosPack()
# pkg_path = rospack.get_path('stopsign')
pkg_path = '/home/buck/ros_ws/src/stopsign'
IMAGE_RATE = 11 # hz
EXACT_FILE = '%s/data/013_extra_man_labels/all_500.csv' % (pkg_path,)
start_image_id = 1
end_image_id = 1093
start_video_id = 1
end_video_id = 25
IMAGE_BASE_STRING = '%s/data/011_new_tests/%s' % (pkg_path, '%02d/frame%04d.jpg')
OUT_BASE_STRING = '%s/data/013_extra_man_labels/500/%s' % (pkg_path, 'frame_%02d_%04d.jpg')
def get_image(video_id, image_id):
filename = IMAGE_BASE_STRING % (video_id, image_id,)
return cv2.imread(filename, cv2.IMREAD_COLOR)
def set_image(img, video_id, image_id):
filename = OUT_BASE_STRING % (video_id, image_id,)
cv2.imwrite(filename, img)
def flatten_kp(kp):
v = np.array(np.zeros((7,)))
v[0] = kp.angle * 1000
v[1] = kp.class_id
v[2] = kp.octave
v[3] = kp.pt[0]
v[4] = kp.pt[1]
v[5] = kp.response * 100000000
v[6] = kp.size
return v
minx = 0
miny = 0
maxx = 10000
maxy = 10000
contour = []
def rebuild_contour():
global minx, miny, maxx, maxy
x1 = minx
x2 = int(2.0/3 * minx + 1.0/3 * maxx)
x3 = int(1.0/3 * minx + 2.0/3 * maxx)
x4 = maxx
y1 = miny
y2 = int(2.0/3 * miny + 1.0/3 * maxy)
y3 = int(1.0/3 * miny + 2.0/3 * maxy)
y4 = maxy
global contour
contour = np.array([[x2, y1], [x3, y1], [x4, y2], [x4, y3],
[x3, y4], [x2, y4], [x1, y3], [x1, y2]], np.int32)
rebuild_contour()
def click_and_crop(event, x, y, flags, param):
global minx, miny, maxx, maxy
if event == cv2.EVENT_LBUTTONDOWN:
minx = x
miny = y
elif event == cv2.EVENT_LBUTTONUP:
maxx = x
maxy = y
rebuild_contour()
def kp_des2vector(klass, image_id, kp, des):
vector = np.zeros((32+7+1+1,))
vector[:1] = np.array([klass]) * 1000
vector[-1] = np.array([image_id])
vector[-8:-1] = np.array(flatten_kp(kp))
vector[1:33] = des
return vector
def hand_label_image(img, video_id, image_id):
global minx, miny, maxx, maxy, contour
results = []
height, width, channels = img.shape
area = height*width
num_features = int((1000.0 * area)/ (640 * 480))
# Initiate STAR detector
orb = cv2.ORB_create(nfeatures = num_features)
# find the keypoints with ORB
kp = orb.detect(img,None)
# compute the descriptors with ORB
kp, des = orb.compute(img, kp)
print('=====\npreview %04d, %04d\n' % (video_id, image_id,))
print('s -> image has a stopsign.\nUse mouse to select stopsign.')
print('\nOR\n')
print('n -> image does not have a stopsign')
print('---')
cv2.imshow('preview', img)
cv2.setMouseCallback('preview', click_and_crop)
val = cv2.waitKey(0) % 256
test_kp = val == ord('s')
cv2.destroyAllWindows()
if test_kp:
for i in range(20):
print('s -> accept polyline as region\n\nOR\n')
print('Use mouse to reselect the region')
print('n -> refresh polyline as region')
print('---')
imgur = img.copy()
short_kp = list(filter(lambda x: cv2.pointPolygonTest(contour, x.pt, False) >= 0, kp))
imgur = cv2.drawKeypoints(
image=img,
keypoints=short_kp,
outImage=imgur,
color=(0,255,0),
flags=int(cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG))
cv2.polylines(imgur, [contour], True, (79*i % 255, 0, 255))
cv2.imshow('preview %d %d' % (video_id, image_id,), imgur)
cv2.setMouseCallback('preview %d %d' % (video_id, image_id,), click_and_crop)
val = cv2.waitKey(0) % 256
if val == ord('s'):
set_image(imgur, video_id, image_id)
break
cv2.destroyAllWindows()
for index, keypoint in enumerate(kp):
descriptor = des[index]
if test_kp:
skip_because_of_radius = cv2.pointPolygonTest(contour, kp[index].pt, False) < 0
if not skip_because_of_radius:
# img2 = cv2.drawKeypoints(
# image=img,
# kp=[kp[index]],
# color=(0,255,0),
# flags=4)
if val == ord('s'):
klass = 1
elif val == ord('n'):
klass = 0
else:
cv2.destroyAllWindows()
raise NotImplementedError('Use s or n please.')
else:
klass = 0
else:
klass = 0
vector = kp_des2vector(klass, image_id, kp[index], descriptor)
results.append(vector)
cv2.destroyAllWindows()
minx = 0
miny = 0
maxx = 10000
maxy = 10000
return results, test_kp
def extend_file(file, new_vectors):
for vector in new_vectors:
file.write(','.join(['%7.2f' % num for num in vector]) + '\n')
def expand_to_string(new_vectors):
for vec in new_vectors:
yield ','.join(['%7d' % num for num in vec])
### Begin the whole process ###
# Generate the first line from data
line0 = []
line0.append('class'.ljust(7))
for i in range(32):
line0.append('descr%02d' % (i,))
# line0.extend(['Keypoint Angle', 'Keypoint Class Id', 'Keypoint Octave', 'Keypoint X', 'Keypoint Y', 'Keypoint Response x 10^6', 'Keypoint Size'])
line0.extend(['angle'.ljust(7), 'classid', 'octave'.ljust(7), 'x'.ljust(7), 'y'.ljust(7), 'respons', 'size'.ljust(7)])
line0.append('imageid')
line0 = ','.join(line0)
exact_lines = [line0]
# Label all images before first stopsign as not-stopsign
# print('Prefilling data')
# for auto_image_id in range(start_image_id):
# if auto_image_id % 100 == 0:
# print('%d / %d' % (auto_image_id, start_image_id,))
# new_vectors = auto_label_image(auto_image_id, 0)
# exact_lines.extend(expand_to_string(new_vectors))
print('Done Prefilling Data')
# Hand label sampled images and auto fill the rest
random.seed(8675309)
# label 100 random images from the dataset
# TODO(buckbaskin) explore changing kp parameters because stopsigns showing w/o
# keypoints even on large stopsigns
for _ in range(500):
video_id = random.randrange(start_video_id, end_video_id)
image_id = random.randrange(start_image_id, end_image_id)
img = get_image(video_id, image_id)
while(img is None):
video_id = random.randrange(start_video_id, end_video_id)
image_id = random.randrange(start_image_id, end_image_id)
img = get_image(video_id, image_id)
new_vectors, is_stopsign = hand_label_image(img, video_id, image_id)
exact_lines.extend(expand_to_string(new_vectors))
print('Write to EXACT_FILE')
print(EXACT_FILE)
with open(EXACT_FILE, 'w') as f:
for line in exact_lines:
f.write('%s\n' % (line,))
|
|
#!/usr/bin/python
from __future__ import print_function
from contextlib import closing
import threading
import configparser
import re
import getopt
import time
import pymysql
import sys
import traceback
from geoInfo.MaxMindRepo import MaxMindRepo
def usage(msg="Usage"):
print(msg)
print('python3 '+sys.argv[0]+' -d RIBS_LOCATION [-h]')
sys.exit(2)
def current_time():
return int(time.time()),time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())
class Logger():
def __init__(self,logfilename):
self.lock = threading.RLock()
self.logfilename = logfilename
def print_log(self,msg):
self.lock.acquire()
try:
#Log file
logfile = open(self.logfilename,'a+')
_,localtime=current_time()
time='['+localtime+'] '
print(time,'INFO:',msg,file=logfile)
logfile.close()
finally:
self.lock.release()
def dbpush_asn_geo(db,asn,location):
#print("In dbpush_asn_geo")
with closing( db.cursor() ) as cur:
try:
cur.execute("select ASNLocation from ASNGeo where ASN = '{0}' and GeoDate='{1}'".format(asn,geoDate))
row=cur.fetchone()
if row is not None: #We have seen this ASN-GeoDate before
countries=re.sub('[{}]','',row[0])
tmpSet=countries.split(',')
for entry in tmpSet:
et=re.sub('[\"|\'| ]','',entry)
location.add(et)
#print(str(location))
cur.execute('update ASNGeo set ASNLocation = "{0}" where GeoDate = "{1}"'.format(str(location),geoDate))
else:
cur.execute("insert into ASNGeo(GeoDate,ASN,ASNLocation) values (%s,%s,%s)",(geoDate,asn,str(location)))
except:
raise Exception('Insert to ASNGeo Failed')
def query_get_distinct_asn(db):
with closing( db.cursor() ) as cur:
toReturn=[]
try:
cur.execute("SELECT distinct OriginAS FROM BGPPrefixGeo where GeoDate= '{0}'".format(geoDate))
row=cur.fetchone()
while row is not None:
if '}' not in row[0] and '{' not in row[0]: #We will ignore Group of Origin ASes
toReturn.extend(row)
row=cur.fetchone()
except Exception:
raise Exception('Select Query Distinct ASN Failed')
if toReturn:
return toReturn
else:
return False
def query_asn_locations(db,asn):
toReturn=set()
with closing( db.cursor() ) as cur:
try:
cur.execute('SELECT distinct PrefixLocation FROM BGPPrefixGeo WHERE OriginAS = "{0}" and GeoDate="{1}"'.format(asn,geoDate))
row=cur.fetchone()
while row is not None:
countries=re.sub('[{}]','',row[0])
tmpSet=countries.split(',')
#print("Row: "+row[0])
#print("TMP: "+str(tmpSet))
for entry in tmpSet:
if entry != "set()":
et=re.sub('[\"|\'| ]','',entry)
#print(entry,et)
toReturn.add(et)
#print("Return: "+str(toReturn))
row=cur.fetchone()
except Exception:
raise Exception('Select Query Failed')
#Lookup presence in IXPs
ixpDict=getIXPList(asn)
ixpCountrySet=getCountriesFromIXPDict(ixpDict)
if len(ixpCountrySet)>0:
for ct in ixpCountrySet:
if ct not in toReturn:
toReturn.add(ct)
with closing(open('countriesAddedFromIXPData.txt','a+')) as asncountryFile:
print(asn+"|"+ct,file=asncountryFile)
#Add countries from PCH data
pchSetCountries=getPCHList(asn)
#print(pchSetCountries)
if len(pchSetCountries)>0:
for pcountry in pchSetCountries:
if pcountry not in toReturn:
toReturn.add(pcountry)
with closing(open('countriesAddedFromPCHData.txt','a+')) as pchcountryFile:
print(asn+"|"+pcountry,file=pchcountryFile)
#Add countries from PeeringDB data
pdbMap={}
with closing(open('asIXPLanIP.txt','r')) as ixpLANIPs:
for line in ixpLANIPs:
vals=line.split('|')
pdbMap[vals[0]]=eval(str(vals[1]))
pdbCountries=getPDBList(asn,pdbMap)
if len(pdbCountries)>0:
for pcountry in pdbCountries:
if pcountry not in toReturn:
toReturn.add(pcountry)
with closing(open('countriesAddedFromPDBData.txt','a+')) as pdbcountryFile:
print(asn+"|"+pcountry,file=pdbcountryFile)
return toReturn
def getProcessedASN():
try:
f = open(list_of_already_processed_ASN)
except:
f = open(list_of_already_processed_ASN,'a+')
logger.print_log(list_of_already_processed_ASN+' file created')
with open(list_of_already_processed_ASN) as f:
lines = f.read().splitlines()
return lines
def print_to_processed_list(ASN):
lock = threading.RLock()
lock.acquire()
try:
f = open(list_of_already_processed_ASN,'a+')
print(ASN,file=f)
#Also update the in-memory list of processed prefixes
processedASN.add(ASN)
f.close()
finally:
lock.release()
def getPDBList(AS,pdbMap):
countriesSet=set()
lanList=pdbMap[AS]
for netip in lanList:
localCountrySet=mm.ipToCountry(netip)
for ct in localCountrySet:
countriesSet.add(ct)
return countriesSet
def getPCHList(AS):
countries=set()
db = pymysql.connect(host=config['IXPMySQL']['serverIP'],
port=int(config['IXPMySQL']['serverPort']),
user=config['IXPMySQL']['user'],
passwd=config['IXPMySQL']['password'],
db=config['IXPMySQL']['dbname'])
with closing(db.cursor()) as cur:
try:
query = 'select IP from pch_participants_list where ASN = "{0}";'.format(" "+AS)
cur.execute(query)
row = cur.fetchone()
while row is not None:
ip=row[0][1:]
localCountrySet=mm.ipToCountry(ip)
#print(ip,localCountrySet)
for ct in localCountrySet:
countries.add(ct)
row = cur.fetchone()
except:
traceback.print_exc()
logger.print_log('IXP IP fetch from PCH failed!')
db.close()
return countries
def getIXPList(AS):
ixpDict={}
db = pymysql.connect(host=config['IXPMySQL']['serverIP'],
port=int(config['IXPMySQL']['serverPort']),
user=config['IXPMySQL']['user'],
passwd=config['IXPMySQL']['password'],
db=config['IXPMySQL']['dbname'])
with closing(db.cursor()) as cur:
try:
query = "SELECT p.ID,ASn,ShortName,Name,City,Country,Continent FROM participants p ,ixps i where p.ID=i.ID and ASn = '{0}'".format(AS)
cur.execute(query)
row = cur.fetchone()
while row is not None:
(ixpid,asn,shortName,name,city,country,continent)=row
ixpDict[ixpid]={}
ixpDict[ixpid]['asn']=asn
ixpDict[ixpid]['shortName']=shortName
ixpDict[ixpid]['name']=name
ixpDict[ixpid]['city']=city
ixpDict[ixpid]['country']=country
ixpDict[ixpid]['continent']=continent
row = cur.fetchone()
except:
logger.print_log('IXP fetch failed!')
db.close()
return ixpDict
def getCountriesFromIXPDict(ixpDict):
countrySet=set()
for ixpID in ixpDict.keys():
countrySet.add(ixpDict[ixpID]['country'])
return countrySet
def runAnalysis():
ASN_List=query_get_distinct_asn(db)
for ASN in ASN_List:
if ASN not in processedASN:
thisASLoc=set()
thisASLoc=query_asn_locations(db,ASN)
#print(ASN,str(thisASLoc))
#if thisASLoc is False:
# print_to_processed_list(ASN)
#else:
dbpush_asn_geo(db,ASN,thisASLoc)
print_to_processed_list(ASN)
db.commit()
if __name__ == "__main__":
start_time,_=current_time()
if sys.version_info < (3,0):
print("ERROR: Please use python3.")
exit(0)
isTest=False
#geoDate="20160105"
config = configparser.ConfigParser()
config.read('./conf/mrt2db_geo.conf')
config.sections()
db = pymysql.connect(host=config['MySQL']['serverIP'],
port=int(config['MySQL']['serverPort']),
user=config['MySQL']['user'],
passwd=config['MySQL']['password'],
db=config['MySQL']['dbname'])
logfilename=None
try:
opts,args = getopt.getopt(sys.argv[1:],'l:h',['logfile','help'])
except getopt.GetoptError:
usage('GetoptError: Arguments not correct')
for opt,arg in opts:
if opt in ('-h', '--help'):
usage()
sys.exit(2)
elif opt in ('-l', '--logfile'):
logfilename = arg
elif opt in ('-g', '--geodate'):
geoDate = arg
elif opt in ('-m', '--maxmindfile'):
maxmindFile = arg
if not geoDate:
usage('Missing geoDate')
if not maxmindFile:
usage('Missing maxmind filename')
#Logger
if not logfilename:
scriptname=sys.argv[0].split('.')
logfilename=scriptname[0]+'.log'
logger=Logger(logfilename)
list_of_already_processed_ASN="geo_processed_ASN.txt"
processedASN = set(getProcessedASN())
#mm = MaxMindRepo('/home3/akshah/akshah_cron_bin/latest_maxmind_bin')
mm = MaxMindRepo(maxmindFile)
runAnalysis()
db.close()
end_time,_=current_time()
logger.print_log('Finished processing in '+str(int((end_time-start_time)/60))+' minutes and '+str(int((end_time-start_time)%60))+' seconds.')
|
|
#!/usr/bin/env python
#
# Generated Fri Sep 23 15:48:06 2011 by generateDS.py version 2.6b.
#
import sys
import people_api as supermod
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError("Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# Globals
#
ExternalEncoding = 'ascii'
#
# Data representation classes
#
class peopleTypeSub(supermod.peopleType):
def __init__(self, comments=None, person=None, specialperson=None, programmer=None, python_programmer=None, java_programmer=None):
super(peopleTypeSub, self).__init__(comments, person, specialperson, programmer, python_programmer, java_programmer, )
def upcase_names(self):
for person in self.get_person():
person.upcase_names()
supermod.peopleType.subclass = peopleTypeSub
# end class peopleTypeSub
class commentsTypeSub(supermod.commentsType):
def __init__(self, emp=None, bold=None, valueOf_=None, mixedclass_=None, content_=None):
super(commentsTypeSub, self).__init__(emp, bold, valueOf_, mixedclass_, content_, )
supermod.commentsType.subclass = commentsTypeSub
# end class commentsTypeSub
class personTypeSub(supermod.personType):
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, range_=None, extensiontype_=None):
super(personTypeSub, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, range_, extensiontype_, )
def upcase_names(self):
self.set_name(self.get_name().upper())
supermod.personType.subclass = personTypeSub
# end class personTypeSub
class specialpersonSub(supermod.specialperson):
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, range_=None):
super(specialpersonSub, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, range_, )
supermod.specialperson.subclass = specialpersonSub
# end class specialpersonSub
class programmerTypeSub(supermod.programmerType):
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, range_=None, language=None, area=None, attrnegint=None, attrposint=None, attrnonnegint=None, attrnonposint=None, email=None, elposint=None, elnonposint=None, elnegint=None, elnonnegint=None, eldate=None, eltoken=None, elshort=None, ellong=None, elparam=None, elarraytypes=None, extensiontype_=None):
super(programmerTypeSub, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, range_, language, area, attrnegint, attrposint, attrnonnegint, attrnonposint, email, elposint, elnonposint, elnegint, elnonnegint, eldate, eltoken, elshort, ellong, elparam, elarraytypes, extensiontype_, )
supermod.programmerType.subclass = programmerTypeSub
# end class programmerTypeSub
class paramTypeSub(supermod.paramType):
def __init__(self, semantic=None, name=None, flow=None, sid=None, type_=None, id=None, valueOf_=None):
super(paramTypeSub, self).__init__(semantic, name, flow, sid, type_, id, valueOf_, )
supermod.paramType.subclass = paramTypeSub
# end class paramTypeSub
class python_programmerTypeSub(supermod.python_programmerType):
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, range_=None, language=None, area=None, attrnegint=None, attrposint=None, attrnonnegint=None, attrnonposint=None, email=None, elposint=None, elnonposint=None, elnegint=None, elnonnegint=None, eldate=None, eltoken=None, elshort=None, ellong=None, elparam=None, elarraytypes=None, nick_name=None, favorite_editor=None, flowvalue=None):
super(python_programmerTypeSub, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, range_, language, area, attrnegint, attrposint, attrnonnegint, attrnonposint, email, elposint, elnonposint, elnegint, elnonnegint, eldate, eltoken, elshort, ellong, elparam, elarraytypes, nick_name, favorite_editor, flowvalue, )
supermod.python_programmerType.subclass = python_programmerTypeSub
# end class python_programmerTypeSub
class java_programmerTypeSub(supermod.java_programmerType):
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, range_=None, language=None, area=None, attrnegint=None, attrposint=None, attrnonnegint=None, attrnonposint=None, email=None, elposint=None, elnonposint=None, elnegint=None, elnonnegint=None, eldate=None, eltoken=None, elshort=None, ellong=None, elparam=None, elarraytypes=None, status=None, nick_name=None, favorite_editor=None):
super(java_programmerTypeSub, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, range_, language, area, attrnegint, attrposint, attrnonnegint, attrnonposint, email, elposint, elnonposint, elnegint, elnonnegint, eldate, eltoken, elshort, ellong, elparam, elarraytypes, status, nick_name, favorite_editor, )
supermod.java_programmerType.subclass = java_programmerTypeSub
# end class java_programmerTypeSub
class agentTypeSub(supermod.agentType):
def __init__(self, firstname=None, lastname=None, priority=None, info=None, vehicle=None, extensiontype_=None):
super(agentTypeSub, self).__init__(firstname, lastname, priority, info, vehicle, extensiontype_, )
supermod.agentType.subclass = agentTypeSub
# end class agentTypeSub
class special_agentTypeSub(supermod.special_agentType):
def __init__(self, firstname=None, lastname=None, priority=None, info=None, vehicle=None):
super(special_agentTypeSub, self).__init__(firstname, lastname, priority, info, vehicle, )
supermod.special_agentType.subclass = special_agentTypeSub
# end class special_agentTypeSub
class weird_agentTypeSub(supermod.weird_agentType):
def __init__(self, firstname=None, lastname=None, priority=None, info=None, vehicle=None):
super(weird_agentTypeSub, self).__init__(firstname, lastname, priority, info, vehicle, )
supermod.weird_agentType.subclass = weird_agentTypeSub
# end class weird_agentTypeSub
class boosterTypeSub(supermod.boosterType):
def __init__(self, member_id=None, firstname=None, lastname=None, other_name=None, classxx=None, other_value=None, type_=None, client_handler=None):
super(boosterTypeSub, self).__init__(member_id, firstname, lastname, other_name, classxx, other_value, type_, client_handler, )
supermod.boosterType.subclass = boosterTypeSub
# end class boosterTypeSub
class infoTypeSub(supermod.infoType):
def __init__(self, rating=None, type_=None, name=None):
super(infoTypeSub, self).__init__(rating, type_, name, )
supermod.infoType.subclass = infoTypeSub
# end class infoTypeSub
class vehicleTypeSub(supermod.vehicleType):
def __init__(self, wheelcount=None, extensiontype_=None):
super(vehicleTypeSub, self).__init__(wheelcount, extensiontype_, )
supermod.vehicleType.subclass = vehicleTypeSub
# end class vehicleTypeSub
class automobileSub(supermod.automobile):
def __init__(self, wheelcount=None, drivername=None):
super(automobileSub, self).__init__(wheelcount, drivername, )
supermod.automobile.subclass = automobileSub
# end class automobileSub
class airplaneSub(supermod.airplane):
def __init__(self, wheelcount=None, pilotname=None):
super(airplaneSub, self).__init__(wheelcount, pilotname, )
supermod.airplane.subclass = airplaneSub
# end class airplaneSub
class client_handlerTypeSub(supermod.client_handlerType):
def __init__(self, fullname=None, refid=None):
super(client_handlerTypeSub, self).__init__(fullname, refid, )
supermod.client_handlerType.subclass = client_handlerTypeSub
# end class client_handlerTypeSub
def get_root_tag(node):
tag = supermod.Tag_pattern_.match(node.tag).groups()[-1]
rootClass = None
if hasattr(supermod, tag):
rootClass = getattr(supermod, tag)
return tag, rootClass
def parse(inFilename):
doc = parsexml_(inFilename)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'people'
rootClass = supermod.peopleType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='')
doc = None
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'people'
rootClass = supermod.peopleType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseLiteral(inFilename):
doc = parsexml_(inFilename)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'people'
rootClass = supermod.peopleType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from ??? import *\n\n')
sys.stdout.write('import ??? as model_\n\n')
sys.stdout.write('rootObj = model_.people(\n')
rootObj.exportLiteral(sys.stdout, 0, name_="people")
sys.stdout.write(')\n')
return rootObj
USAGE_TEXT = """
Usage: python ???.py <infilename>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def main():
args = sys.argv[1:]
if len(args) != 1:
usage()
infilename = args[0]
root = parse(infilename)
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
|
|
'''
Created on 8 May 2019
@author: si
'''
from datetime import datetime
from multiprocessing import Process, Pipe
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from pi_fly.actional.abstract import CommsMessage
from pi_fly.actional.actional_management import build_actional_processes, governor_run_forever
from pi_fly.actional.dummy import DummyActional
from pi_fly.devices.dummy import DummyOutput
from pi_fly.model import Base, Event
from pi_fly.scoreboard import ScoreBoard
from pi_fly.test.test_base import BaseTest
class TestActionals(BaseTest):
def test_communications_channel(self):
"""
Send a command to the dummy actional and it replies saying the command was accepted.
It then send a log message as well.
"""
# this bit will be done in the profile file because the actionals available are deployment
# specific along with the sensors that the actional reads from and the devices it writes
# to.
a = DummyActional(name="fake_actional",
my_input="the_time",
my_output=DummyOutput(name="fake_output")
)
# connecting the comms channel isn't done in the profile but by a controller that
# also instantiates the actional into it's own Process.
parent_conn, child_conn = Pipe()
a.set_comms_channel(child_conn)
p = Process(target=a)
p.start()
parent_conn.send(CommsMessage(action="command", message="hello"))
parent_conn.send(CommsMessage(action="command", message="terminate"))
comms_messages_recieved = []
while True:
if parent_conn.poll(0.1):
comms_messages_recieved.append(parent_conn.recv())
if not p.is_alive():
break
p.join()
log_messages = [cm.message for cm in comms_messages_recieved]
self.assertIn('hello command hello',
str(log_messages),
"DummyActional.run_command didn't run")
self.assertIn('dummy actional (fake_actional) is running',
str(log_messages),
"DummyActional.actional_loop_actions fail")
def test_scoreboard_io(self):
"""
Input and output to the scoreboard
"""
a = DummyActional(name="fake_actional",
my_input="the_time",
my_output=DummyOutput(name="fake_output")
)
scoreboard = ScoreBoard()
a.set_scoreboard(scoreboard)
# put something onto the scoreboard for the dummy actional to read
fake_sensor_output = {'sensor_id': None,
'value_type': "time",
'value_float': 42
}
scoreboard.update_value('the_time', fake_sensor_output)
# :method:`actional_loop_actions` is normally called once per loop.
# The dummy actional looks for the scoreboard value for it's 'my_input' and responds by
# doubling the number it finds and putting this value back on the scoreboard under device
# name 'actional_reply'
a.actional_loop_actions()
reply = scoreboard.get_current_value('actional_reply')
self.assertEqual(84, reply)
def test_output_device(self):
"""
On receiving the pre-agree magic number 123 from the input device, set the output device
to True.
"""
output = DummyOutput(name="fake_output",
set_state_on_start=False
)
a = DummyActional(name="fake_actional",
my_input="the_time",
my_output=output
)
scoreboard = ScoreBoard()
a.set_scoreboard(scoreboard)
fake_sensor_output = {'sensor_id': None,
'value_type': "time",
'value_float': 123
}
scoreboard.update_value('the_time', fake_sensor_output)
self.assertFalse(output.state)
a.actional_loop_actions()
self.assertTrue(output.state)
def test_build_actional_processes_nothing(self):
scoreboard = ScoreBoard()
self.profile.ACTIONALS = []
actional_details = build_actional_processes(self.profile, scoreboard)
self.assertEqual({}, actional_details)
def test_build_actional_processes(self):
scoreboard = ScoreBoard()
actional_details = build_actional_processes(self.profile, scoreboard)
self.assertEqual(2, len(actional_details))
def test_pipe_on_the_scoreboard(self):
"""
The scoreboard is a grouping of shared variables and uses :class:`multiprocessing.Manager`
to keep access thread/process safe. Can pipes be shared?!
"""
scoreboard = ScoreBoard()
parent_conn, child_conn = Pipe()
scoreboard.update_value('message_pipe', parent_conn)
child_conn.send("hello pipe!")
pipe_from_scoreboard = scoreboard.get_current_value('message_pipe')
received_value = pipe_from_scoreboard.recv()
self.assertEqual("hello pipe!", received_value)
def build_governor(self, scoreboard, expected_log_message, messages_to_send):
"""
Args:
expected_log_message (str) - finish immediately when this message is received.
messages_to_send (list of CommsMessage) to send to fake_actional_0
"""
actional_details = build_actional_processes(self.profile, scoreboard)
ac_names = []
proc_table = []
for ac_name, ac_parts in actional_details.items():
# can't pickle a process so just put the comms part on
scoreboard.update_value(ac_name, {'comms': ac_parts['comms']})
ac_names.append(ac_name)
proc_table.append(ac_parts['process'])
ac_parts['process'].start()
logging_parent, logging_child = Pipe()
governor_kwargs = {'scoreboard': scoreboard,
'actional_names': ac_names,
'profile': self.profile,
'logging_pipe': logging_child
}
p = Process(target=governor_run_forever, kwargs=governor_kwargs)
proc_table.append(p)
p.start()
# known name from profile of a DummyActional
pipe_from_scoreboard = scoreboard.get_current_value('fake_actional_0')['comms']
for comms_message in messages_to_send:
pipe_from_scoreboard.send(comms_message)
msgs = []
max_messages = 20 # more than this means fail
while logging_parent.poll(1):
log_msg = logging_parent.recv()
self.assertIsInstance(log_msg, CommsMessage)
msg = log_msg.message[0]
msgs.append(msg)
if expected_log_message in msg:
break
if len(msgs) >= max_messages:
break
for p in proc_table:
p.terminate()
for p in proc_table:
if p.is_alive():
p.join()
return msgs
def test_governor_run_forever(self):
"""
Read log messages from actionals that are being run by the governor.
"""
scoreboard = ScoreBoard()
expected_log_message = 'hello command hello'
test_command = [CommsMessage(action="command", message="hello"), ]
log_msgs = self.build_governor(scoreboard,
expected_log_message=expected_log_message,
messages_to_send=test_command
)
self.assertIn(expected_log_message, "".join(log_msgs), "Couldn't find expected log message")
def test_available_commands(self):
output = DummyOutput(name="fake_output",
set_state_on_start=False
)
a = DummyActional(name="fake_actional",
my_input="the_time",
my_output=output
)
self.assertEqual(2, len(a.available_commands))
dummy_actional_commands = set([ax.command for ax in a.available_commands])
self.assertEqual(set(['hello', 'send_event']), dummy_actional_commands)
def test_event(self):
"""
Test the dummy actional can send an event that would get back to the governor and then on to database.
"""
# database is normally created by the DatabaseStoragePollingLoop and web view
# consumes from this db. But this unit test shouldn't be concerned with
# DatabaseStoragePollingLoop so doing own create.
engine = create_engine(self.profile.SQLALCHEMY_DATABASE_URI)
Base.metadata.create_all(engine)
DBSession = sessionmaker(bind=engine)
db_session = DBSession()
all_events = [r for r in db_session.query(Event).all()]
self.assertEqual(0, len(all_events), "DB table should be empty")
scoreboard = ScoreBoard()
expected_log_message = 'example event sent'
test_command = [CommsMessage(action="command", message="send_event"),
CommsMessage(action="command", message="terminate"),
]
log_msgs = self.build_governor(scoreboard,
expected_log_message=expected_log_message,
messages_to_send=test_command
)
self.assertIn(expected_log_message, "".join(log_msgs), "Couldn't find expected log message")
all_events = [r for r in db_session.query(Event).all()]
self.assertEqual(1, len(all_events), "One event should be in the DB")
test_event = all_events[0]
self.assertIsInstance(test_event.start, datetime)
self.assertIsInstance(test_event.end, datetime)
self.assertEqual(test_event.source, 'fake_actional_0')
self.assertEqual(test_event.label, 'example event')
|
|
import os
import unittest
from pysd.tools.benchmarking import runner, assert_frames_close
rtol = .05
_root = os.path.dirname(__file__)
test_models = os.path.join(_root, "test-models/tests")
class TestIntegrationExamples(unittest.TestCase):
def test_abs(self):
output, canon = runner(test_models + '/abs/test_abs.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('error in model file')
def test_active_initial(self):
output, canon = runner(test_models + '/active_initial/test_active_initial.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('missing model file')
def test_arguments(self):
output, canon = runner(test_models + '/arguments/test_arguments.mdl')
assert_frames_close(output, canon, rtol=rtol)
def test_builtin_max(self):
output, canon = runner(test_models + '/builtin_max/builtin_max.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_builtin_min(self):
output, canon = runner(test_models + '/builtin_min/builtin_min.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_chained_initialization(self):
output, canon = runner(
test_models + '/chained_initialization/test_chained_initialization.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_comparisons(self):
output, canon = runner(
test_models + '/comparisons/comparisons.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_constant_expressions(self):
output, canon = runner(
test_models + '/constant_expressions/test_constant_expressions.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('missing test model')
def test_delay_parentheses(self):
output, canon = runner(
test_models + '/delay_parentheses/test_delay_parentheses.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('missing test model')
def test_delays(self):
output, canon = runner(test_models + '/delays/test_delays.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('missing test model')
def test_euler_step_vs_saveper(self):
output, canon = runner(
test_models + '/euler_step_vs_saveper/test_euler_step_vs_saveper.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_eval_order(self):
output, canon = runner(
test_models + '/eval_order/eval_order.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_exp(self):
output, canon = runner(test_models + '/exp/test_exp.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_exponentiation(self):
output, canon = runner(test_models + '/exponentiation/exponentiation.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_function_capitalization(self):
output, canon = runner(
test_models + '/function_capitalization/test_function_capitalization.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('not sure if this is implemented in xmile?')
def test_game(self):
output, canon = runner(test_models + '/game/test_game.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_if_stmt(self):
output, canon = runner(test_models + '/if_stmt/if_stmt.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_initial_function(self):
output, canon = runner(test_models + '/initial_function/test_initial.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile model')
def test_input_functions(self):
output, canon = runner(test_models + '/input_functions/test_inputs.mdl')
assert_frames_close(output, canon, rtol=rtol)
def test_limits(self):
output, canon = runner(test_models + '/limits/test_limits.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_line_breaks(self):
output, canon = runner(test_models + '/line_breaks/test_line_breaks.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_line_continuation(self):
output, canon = runner(test_models + '/line_continuation/test_line_continuation.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_ln(self):
output, canon = runner(test_models + '/ln/test_ln.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_log(self):
output, canon = runner(test_models + '/log/test_log.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_logicals(self):
output, canon = runner(test_models + '/logicals/test_logicals.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_lookups(self):
output, canon = runner(test_models + '/lookups/test_lookups.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_lookups_xscale(self):
output, canon = runner(test_models + '/lookups/test_lookups_xscale.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_lookups_xpts_sep(self):
output, canon = runner(test_models + '/lookups/test_lookups_xpts_sep.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_lookups_ypts_sep(self):
output, canon = runner(test_models + '/lookups/test_lookups_ypts_sep.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_lookups_funcnames(self):
output, canon = runner(test_models + '/lookups_funcnames/test_lookups_funcnames.mdl')
assert_frames_close(output, canon, rtol=rtol)
def test_lookups_inline(self):
output, canon = runner(test_models + '/lookups_inline/test_lookups_inline.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_lookups_inline_bounded(self):
output, canon = runner(
test_models + '/lookups_inline_bounded/test_lookups_inline_bounded.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_macro_cross_reference(self):
output, canon = runner(test_models + '/macro_cross_reference/test_macro_cross_reference.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('missing test model')
def test_macro_expression(self):
output, canon = runner(test_models + '/macro_expression/test_macro_expression.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('missing test model')
def test_macro_multi_expression(self):
output, canon = runner(
test_models + '/macro_multi_expression/test_macro_multi_expression.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('missing test model')
def test_macro_multi_macros(self):
output, canon = runner(
test_models + '/macro_multi_macros/test_macro_multi_macros.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_macro_output(self):
output, canon = runner(test_models + '/macro_output/test_macro_output.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('missing test model')
def test_macro_stock(self):
output, canon = runner(test_models + '/macro_stock/test_macro_stock.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('do we need this?')
def test_macro_trailing_definition(self):
output, canon = runner(test_models + '/macro_trailing_definition/test_macro_trailing_definition.mdl')
assert_frames_close(output, canon, rtol=rtol)
def test_model_doc(self):
output, canon = runner(test_models + '/model_doc/model_doc.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_number_handling(self):
output, canon = runner(test_models + '/number_handling/test_number_handling.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_parentheses(self):
output, canon = runner(test_models + '/parentheses/test_parens.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_reference_capitalization(self):
"""A properly formatted Vensim model should never create this failure"""
output, canon = runner(
test_models + '/reference_capitalization/test_reference_capitalization.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('in branch')
def test_rounding(self):
output, canon = runner(test_models + '/rounding/test_rounding.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_smooth(self):
output, canon = runner(test_models + '/smooth/test_smooth.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('missing test model')
def test_smooth_and_stock(self):
output, canon = runner(test_models + '/smooth_and_stock/test_smooth_and_stock.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('missing test model')
def test_special_characters(self):
output, canon = runner(
test_models + '/special_characters/test_special_variable_names.xmile')
assert_frames_close(output, canon, rtol=rtol)
def test_sqrt(self):
output, canon = runner(test_models + '/sqrt/test_sqrt.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('missing test model')
def test_subscript_multiples(self):
output, canon = runner(
test_models + '/subscript multiples/test_multiple_subscripts.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('missing test model')
def test_subscript_1d_arrays(self):
output, canon = runner(
test_models + '/subscript_1d_arrays/test_subscript_1d_arrays.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_subscript_2d_arrays(self):
output, canon = runner(
test_models + '/subscript_2d_arrays/test_subscript_2d_arrays.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_subscript_3d_arrays(self):
output, canon = runner(test_models + '/subscript_3d_arrays/test_subscript_3d_arrays.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_subscript_3d_arrays_lengthwise(self):
output, canon = runner(test_models + '/subscript_3d_arrays_lengthwise/test_subscript_3d_arrays_lengthwise.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_subscript_3d_arrays_widthwise(self):
output, canon = runner(test_models + '/subscript_3d_arrays_widthwise/test_subscript_3d_arrays_widthwise.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('in branch')
def test_subscript_aggregation(self):
output, canon = runner(test_models + '/subscript_aggregation/test_subscript_aggregation.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('missing test model')
def test_subscript_constant_call(self):
output, canon = runner(
test_models + '/subscript_constant_call/test_subscript_constant_call.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_subscript_docs(self):
output, canon = runner(test_models + '/subscript_docs/subscript_docs.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_subscript_individually_defined_1_of_2d_arrays(self):
output, canon = runner(test_models + '/subscript_individually_defined_1_of_2d_arrays/subscript_individually_defined_1_of_2d_arrays.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_subscript_individually_defined_1_of_2d_arrays_from_floats(self):
output, canon = runner(test_models + '/subscript_individually_defined_1_of_2d_arrays_from_floats/subscript_individually_defined_1_of_2d_arrays_from_floats.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_subscript_individually_defined_1d_arrays(self):
output, canon = runner(test_models + '/subscript_individually_defined_1d_arrays/subscript_individually_defined_1d_arrays.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_subscript_individually_defined_stocks(self):
output, canon = runner(test_models + '/subscript_individually_defined_stocks/test_subscript_individually_defined_stocks.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_subscript_mixed_assembly(self):
output, canon = runner(test_models + '/subscript_mixed_assembly/test_subscript_mixed_assembly.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_subscript_selection(self):
output, canon = runner(test_models + '/subscript_selection/subscript_selection.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('missing test model')
def test_subscript_subranges(self):
output, canon = runner(
test_models + '/subscript_subranges/test_subscript_subrange.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('missing test model')
def test_subscript_subranges_equal(self):
output, canon = runner(
test_models + '/subscript_subranges_equal/test_subscript_subrange_equal.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_subscript_switching(self):
output, canon = runner(test_models + '/subscript_switching/subscript_switching.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('missing test model')
def test_subscript_updimensioning(self):
output, canon = runner(
test_models + '/subscript_updimensioning/test_subscript_updimensioning.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_subscripted_delays(self):
output, canon = runner(test_models + '/subscripted_delays/test_subscripted_delays.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_subscripted_flows(self):
output, canon = runner(test_models + '/subscripted_flows/test_subscripted_flows.mdl')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_time(self):
output, canon = runner(test_models + '/time/test_time.mdl')
assert_frames_close(output, canon, rtol=rtol)
def test_trig(self):
output, canon = runner(test_models + '/trig/test_trig.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_trend(self):
output, canon = runner(test_models + '/trend/test_trend.xmile')
assert_frames_close(output, canon, rtol=rtol)
@unittest.skip('no xmile')
def test_xidz_zidz(self):
output, canon = runner(test_models + '/xidz_zidz/xidz_zidz.xmile')
assert_frames_close(output, canon, rtol=rtol)
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova import exception
from nova.network import model
from nova import test
from nova.tests.unit import fake_network_cache_model
from nova.virt import netutils
class RouteTests(test.NoDBTestCase):
def test_create_route_with_attrs(self):
route = fake_network_cache_model.new_route()
fake_network_cache_model.new_ip(dict(address='192.168.1.1'))
self.assertEqual(route['cidr'], '0.0.0.0/24')
self.assertEqual(route['gateway']['address'], '192.168.1.1')
self.assertEqual(route['interface'], 'eth0')
def test_routes_equal(self):
route1 = model.Route()
route2 = model.Route()
self.assertEqual(route1, route2)
def test_routes_not_equal(self):
route1 = model.Route(cidr='1.1.1.0/24')
route2 = model.Route(cidr='2.2.2.0/24')
self.assertNotEqual(route1, route2)
route1 = model.Route(cidr='1.1.1.1/24', gateway='1.1.1.1')
route2 = model.Route(cidr='1.1.1.1/24', gateway='1.1.1.2')
self.assertNotEqual(route1, route2)
route1 = model.Route(cidr='1.1.1.1/24', interface='tap0')
route2 = model.Route(cidr='1.1.1.1/24', interface='tap1')
self.assertNotEqual(route1, route2)
def test_hydrate(self):
route = model.Route.hydrate(
{'gateway': fake_network_cache_model.new_ip(
dict(address='192.168.1.1'))})
self.assertIsNone(route['cidr'])
self.assertEqual(route['gateway']['address'], '192.168.1.1')
self.assertIsNone(route['interface'])
class IPTests(test.NoDBTestCase):
def test_ip_equal(self):
ip1 = model.IP(address='127.0.0.1')
ip2 = model.IP(address='127.0.0.1')
self.assertEqual(ip1, ip2)
def test_ip_not_equal(self):
ip1 = model.IP(address='127.0.0.1')
ip2 = model.IP(address='172.0.0.3')
self.assertNotEqual(ip1, ip2)
ip1 = model.IP(address='127.0.0.1', type=1)
ip2 = model.IP(address='172.0.0.1', type=2)
self.assertNotEqual(ip1, ip2)
ip1 = model.IP(address='127.0.0.1', version=4)
ip2 = model.IP(address='172.0.0.1', version=6)
self.assertNotEqual(ip1, ip2)
class FixedIPTests(test.NoDBTestCase):
def test_createnew_fixed_ip_with_attrs(self):
fixed_ip = model.FixedIP(address='192.168.1.100')
self.assertEqual(fixed_ip['address'], '192.168.1.100')
self.assertEqual(fixed_ip['floating_ips'], [])
self.assertEqual(fixed_ip['type'], 'fixed')
self.assertEqual(fixed_ip['version'], 4)
def test_create_fixed_ipv6(self):
fixed_ip = model.FixedIP(address='::1')
self.assertEqual(fixed_ip['address'], '::1')
self.assertEqual(fixed_ip['floating_ips'], [])
self.assertEqual(fixed_ip['type'], 'fixed')
self.assertEqual(fixed_ip['version'], 6)
def test_create_fixed_bad_ip_fails(self):
self.assertRaises(exception.InvalidIpAddressError,
model.FixedIP,
address='picklespicklespickles')
def test_equate_two_fixed_ips(self):
fixed_ip = model.FixedIP(address='::1')
fixed_ip2 = model.FixedIP(address='::1')
self.assertEqual(fixed_ip, fixed_ip2)
def test_equate_two_dissimilar_fixed_ips_fails(self):
fixed_ip = model.FixedIP(address='::1')
fixed_ip2 = model.FixedIP(address='::2')
self.assertNotEqual(fixed_ip, fixed_ip2)
fixed_ip = model.FixedIP(address='::1', type='1')
fixed_ip2 = model.FixedIP(address='::1', type='2')
self.assertNotEqual(fixed_ip, fixed_ip2)
fixed_ip = model.FixedIP(address='::1', version='6')
fixed_ip2 = model.FixedIP(address='::1', version='4')
self.assertNotEqual(fixed_ip, fixed_ip2)
fixed_ip = model.FixedIP(address='::1', floating_ips='1.1.1.1')
fixed_ip2 = model.FixedIP(address='::1', floating_ips='8.8.8.8')
self.assertNotEqual(fixed_ip, fixed_ip2)
def test_hydrate(self):
fixed_ip = model.FixedIP.hydrate({})
self.assertEqual(fixed_ip['floating_ips'], [])
self.assertIsNone(fixed_ip['address'])
self.assertEqual(fixed_ip['type'], 'fixed')
self.assertIsNone(fixed_ip['version'])
def test_add_floating_ip(self):
fixed_ip = model.FixedIP(address='192.168.1.100')
fixed_ip.add_floating_ip('192.168.1.101')
self.assertEqual(fixed_ip['floating_ips'], ['192.168.1.101'])
def test_add_floating_ip_repeatedly_only_one_instance(self):
fixed_ip = model.FixedIP(address='192.168.1.100')
for i in xrange(10):
fixed_ip.add_floating_ip('192.168.1.101')
self.assertEqual(fixed_ip['floating_ips'], ['192.168.1.101'])
class SubnetTests(test.NoDBTestCase):
def test_create_subnet_with_attrs(self):
subnet = fake_network_cache_model.new_subnet()
route1 = fake_network_cache_model.new_route()
self.assertEqual(subnet['cidr'], '10.10.0.0/24')
self.assertEqual(subnet['dns'],
[fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
fake_network_cache_model.new_ip(dict(address='2.3.4.5'))])
self.assertEqual(subnet['gateway']['address'], '10.10.0.1')
self.assertEqual(subnet['ips'],
[fake_network_cache_model.new_fixed_ip(
dict(address='10.10.0.2')),
fake_network_cache_model.new_fixed_ip(
dict(address='10.10.0.3'))])
self.assertEqual(subnet['routes'], [route1])
self.assertEqual(subnet['version'], 4)
def test_subnet_equal(self):
subnet1 = fake_network_cache_model.new_subnet()
subnet2 = fake_network_cache_model.new_subnet()
self.assertEqual(subnet1, subnet2)
def test_subnet_not_equal(self):
subnet1 = model.Subnet(cidr='1.1.1.0/24')
subnet2 = model.Subnet(cidr='2.2.2.0/24')
self.assertNotEqual(subnet1, subnet2)
subnet1 = model.Subnet(dns='1.1.1.0/24')
subnet2 = model.Subnet(dns='2.2.2.0/24')
self.assertNotEqual(subnet1, subnet2)
subnet1 = model.Subnet(gateway='1.1.1.1/24')
subnet2 = model.Subnet(gateway='2.2.2.1/24')
self.assertNotEqual(subnet1, subnet2)
subnet1 = model.Subnet(ips='1.1.1.0/24')
subnet2 = model.Subnet(ips='2.2.2.0/24')
self.assertNotEqual(subnet1, subnet2)
subnet1 = model.Subnet(routes='1.1.1.0/24')
subnet2 = model.Subnet(routes='2.2.2.0/24')
self.assertNotEqual(subnet1, subnet2)
subnet1 = model.Subnet(version='4')
subnet2 = model.Subnet(version='6')
self.assertNotEqual(subnet1, subnet2)
def test_add_route(self):
subnet = fake_network_cache_model.new_subnet()
route1 = fake_network_cache_model.new_route()
route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'})
subnet.add_route(route2)
self.assertEqual(subnet['routes'], [route1, route2])
def test_add_route_a_lot(self):
subnet = fake_network_cache_model.new_subnet()
route1 = fake_network_cache_model.new_route()
route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'})
for i in xrange(10):
subnet.add_route(route2)
self.assertEqual(subnet['routes'], [route1, route2])
def test_add_dns(self):
subnet = fake_network_cache_model.new_subnet()
dns = fake_network_cache_model.new_ip(dict(address='9.9.9.9'))
subnet.add_dns(dns)
self.assertEqual(subnet['dns'],
[fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
fake_network_cache_model.new_ip(dict(address='2.3.4.5')),
fake_network_cache_model.new_ip(dict(address='9.9.9.9'))])
def test_add_dns_a_lot(self):
subnet = fake_network_cache_model.new_subnet()
for i in xrange(10):
subnet.add_dns(fake_network_cache_model.new_ip(
dict(address='9.9.9.9')))
self.assertEqual(subnet['dns'],
[fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
fake_network_cache_model.new_ip(dict(address='2.3.4.5')),
fake_network_cache_model.new_ip(dict(address='9.9.9.9'))])
def test_add_ip(self):
subnet = fake_network_cache_model.new_subnet()
subnet.add_ip(fake_network_cache_model.new_ip(
dict(address='192.168.1.102')))
self.assertEqual(subnet['ips'],
[fake_network_cache_model.new_fixed_ip(
dict(address='10.10.0.2')),
fake_network_cache_model.new_fixed_ip(
dict(address='10.10.0.3')),
fake_network_cache_model.new_ip(
dict(address='192.168.1.102'))])
def test_add_ip_a_lot(self):
subnet = fake_network_cache_model.new_subnet()
for i in xrange(10):
subnet.add_ip(fake_network_cache_model.new_fixed_ip(
dict(address='192.168.1.102')))
self.assertEqual(subnet['ips'],
[fake_network_cache_model.new_fixed_ip(
dict(address='10.10.0.2')),
fake_network_cache_model.new_fixed_ip(
dict(address='10.10.0.3')),
fake_network_cache_model.new_fixed_ip(
dict(address='192.168.1.102'))])
def test_hydrate(self):
subnet_dict = {
'cidr': '255.255.255.0',
'dns': [fake_network_cache_model.new_ip(dict(address='1.1.1.1'))],
'ips': [fake_network_cache_model.new_fixed_ip(
dict(address='2.2.2.2'))],
'routes': [fake_network_cache_model.new_route()],
'version': 4,
'gateway': fake_network_cache_model.new_ip(
dict(address='3.3.3.3'))}
subnet = model.Subnet.hydrate(subnet_dict)
self.assertEqual(subnet['cidr'], '255.255.255.0')
self.assertEqual(subnet['dns'], [fake_network_cache_model.new_ip(
dict(address='1.1.1.1'))])
self.assertEqual(subnet['gateway']['address'], '3.3.3.3')
self.assertEqual(subnet['ips'], [fake_network_cache_model.new_fixed_ip(
dict(address='2.2.2.2'))])
self.assertEqual(subnet['routes'], [
fake_network_cache_model.new_route()])
self.assertEqual(subnet['version'], 4)
class NetworkTests(test.NoDBTestCase):
def test_create_network(self):
network = fake_network_cache_model.new_network()
self.assertEqual(network['id'], 1)
self.assertEqual(network['bridge'], 'br0')
self.assertEqual(network['label'], 'public')
self.assertEqual(network['subnets'],
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255'))])
def test_add_subnet(self):
network = fake_network_cache_model.new_network()
network.add_subnet(fake_network_cache_model.new_subnet(
dict(cidr='0.0.0.0')))
self.assertEqual(network['subnets'],
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255')),
fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))])
def test_add_subnet_a_lot(self):
network = fake_network_cache_model.new_network()
for i in xrange(10):
network.add_subnet(fake_network_cache_model.new_subnet(
dict(cidr='0.0.0.0')))
self.assertEqual(network['subnets'],
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255')),
fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))])
def test_network_equal(self):
network1 = model.Network()
network2 = model.Network()
self.assertEqual(network1, network2)
def test_network_not_equal(self):
network1 = model.Network(id='1')
network2 = model.Network(id='2')
self.assertNotEqual(network1, network2)
network1 = model.Network(bridge='br-int')
network2 = model.Network(bridge='br0')
self.assertNotEqual(network1, network2)
network1 = model.Network(label='net1')
network2 = model.Network(label='net2')
self.assertNotEqual(network1, network2)
network1 = model.Network(subnets='1.1.1.0/24')
network2 = model.Network(subnets='2.2.2.0/24')
self.assertNotEqual(network1, network2)
def test_hydrate(self):
fake_network_cache_model.new_subnet()
fake_network_cache_model.new_subnet(dict(cidr='255.255.255.255'))
network = model.Network.hydrate(fake_network_cache_model.new_network())
self.assertEqual(network['id'], 1)
self.assertEqual(network['bridge'], 'br0')
self.assertEqual(network['label'], 'public')
self.assertEqual(network['subnets'],
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255'))])
class VIFTests(test.NoDBTestCase):
def test_create_vif(self):
vif = fake_network_cache_model.new_vif()
self.assertEqual(vif['id'], 1)
self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
self.assertEqual(vif['network'],
fake_network_cache_model.new_network())
def test_vif_equal(self):
vif1 = model.VIF()
vif2 = model.VIF()
self.assertEqual(vif1, vif2)
def test_vif_not_equal(self):
vif1 = model.VIF(id=1)
vif2 = model.VIF(id=2)
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(address='00:00:00:00:00:11')
vif2 = model.VIF(address='00:00:00:00:00:22')
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(network='net1')
vif2 = model.VIF(network='net2')
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(type='ovs')
vif2 = model.VIF(type='linuxbridge')
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(devname='ovs1234')
vif2 = model.VIF(devname='linuxbridge1234')
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(qbh_params=1)
vif2 = model.VIF(qbh_params=None)
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(qbg_params=1)
vif2 = model.VIF(qbg_params=None)
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(active=True)
vif2 = model.VIF(active=False)
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(vnic_type=model.VNIC_TYPE_NORMAL)
vif2 = model.VIF(vnic_type=model.VNIC_TYPE_DIRECT)
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(profile={'pci_slot': '0000:0a:00.1'})
vif2 = model.VIF(profile={'pci_slot': '0000:0a:00.2'})
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(preserve_on_delete=True)
vif2 = model.VIF(preserve_on_delete=False)
self.assertNotEqual(vif1, vif2)
def test_create_vif_with_type(self):
vif_dict = dict(
id=1,
address='aa:aa:aa:aa:aa:aa',
network=fake_network_cache_model.new_network(),
type='bridge')
vif = fake_network_cache_model.new_vif(vif_dict)
self.assertEqual(vif['id'], 1)
self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
self.assertEqual(vif['type'], 'bridge')
self.assertEqual(vif['network'],
fake_network_cache_model.new_network())
def test_vif_get_fixed_ips(self):
vif = fake_network_cache_model.new_vif()
fixed_ips = vif.fixed_ips()
ips = [
fake_network_cache_model.new_fixed_ip(dict(address='10.10.0.2')),
fake_network_cache_model.new_fixed_ip(dict(address='10.10.0.3'))
] * 2
self.assertEqual(fixed_ips, ips)
def test_vif_get_floating_ips(self):
vif = fake_network_cache_model.new_vif()
vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1')
floating_ips = vif.floating_ips()
self.assertEqual(floating_ips, ['192.168.1.1'])
def test_vif_get_labeled_ips(self):
vif = fake_network_cache_model.new_vif()
labeled_ips = vif.labeled_ips()
ip_dict = {
'network_id': 1,
'ips': [fake_network_cache_model.new_ip(
{'address': '10.10.0.2', 'type': 'fixed'}),
fake_network_cache_model.new_ip(
{'address': '10.10.0.3', 'type': 'fixed'})] * 2,
'network_label': 'public'}
self.assertEqual(labeled_ips, ip_dict)
def test_hydrate(self):
fake_network_cache_model.new_network()
vif = model.VIF.hydrate(fake_network_cache_model.new_vif())
self.assertEqual(vif['id'], 1)
self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
self.assertEqual(vif['network'],
fake_network_cache_model.new_network())
def test_hydrate_vif_with_type(self):
vif_dict = dict(
id=1,
address='aa:aa:aa:aa:aa:aa',
network=fake_network_cache_model.new_network(),
type='bridge')
vif = model.VIF.hydrate(fake_network_cache_model.new_vif(vif_dict))
self.assertEqual(vif['id'], 1)
self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
self.assertEqual(vif['type'], 'bridge')
self.assertEqual(vif['network'],
fake_network_cache_model.new_network())
class NetworkInfoTests(test.NoDBTestCase):
def test_create_model(self):
ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(),
fake_network_cache_model.new_vif(
{'address': 'bb:bb:bb:bb:bb:bb'})])
self.assertEqual(ninfo.fixed_ips(),
[fake_network_cache_model.new_fixed_ip(
{'address': '10.10.0.2'}),
fake_network_cache_model.new_fixed_ip(
{'address': '10.10.0.3'})] * 4)
def test_create_async_model(self):
def async_wrapper():
return model.NetworkInfo(
[fake_network_cache_model.new_vif(),
fake_network_cache_model.new_vif(
{'address': 'bb:bb:bb:bb:bb:bb'})])
ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
self.assertEqual(ninfo.fixed_ips(),
[fake_network_cache_model.new_fixed_ip(
{'address': '10.10.0.2'}),
fake_network_cache_model.new_fixed_ip(
{'address': '10.10.0.3'})] * 4)
def test_create_async_model_exceptions(self):
def async_wrapper():
raise test.TestingException()
ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
self.assertRaises(test.TestingException, ninfo.wait)
# 2nd one doesn't raise
self.assertIsNone(ninfo.wait())
# Test that do_raise=False works on .wait()
ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
self.assertIsNone(ninfo.wait(do_raise=False))
# Test we also raise calling a method
ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
self.assertRaises(test.TestingException, ninfo.fixed_ips)
def test_get_floating_ips(self):
vif = fake_network_cache_model.new_vif()
vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1')
ninfo = model.NetworkInfo([vif,
fake_network_cache_model.new_vif(
{'address': 'bb:bb:bb:bb:bb:bb'})])
self.assertEqual(ninfo.floating_ips(), ['192.168.1.1'])
def test_hydrate(self):
ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(),
fake_network_cache_model.new_vif(
{'address': 'bb:bb:bb:bb:bb:bb'})])
model.NetworkInfo.hydrate(ninfo)
self.assertEqual(ninfo.fixed_ips(),
[fake_network_cache_model.new_fixed_ip(
{'address': '10.10.0.2'}),
fake_network_cache_model.new_fixed_ip(
{'address': '10.10.0.3'})] * 4)
def _setup_injected_network_scenario(self, should_inject=True,
use_ipv4=True, use_ipv6=False,
gateway=True, dns=True,
two_interfaces=False,
libvirt_virt_type=None):
"""Check that netutils properly decides whether to inject based on
whether the supplied subnet is static or dynamic.
"""
network = fake_network_cache_model.new_network({'subnets': []})
subnet_dict = {}
if not gateway:
subnet_dict['gateway'] = None
if not dns:
subnet_dict['dns'] = None
if not should_inject:
subnet_dict['dhcp_server'] = '10.10.0.1'
if use_ipv4:
network.add_subnet(
fake_network_cache_model.new_subnet(subnet_dict))
if should_inject and use_ipv6:
gateway_ip = fake_network_cache_model.new_ip(dict(
address='1234:567::1'))
ip = fake_network_cache_model.new_ip(dict(
address='1234:567::2'))
ipv6_subnet_dict = dict(
cidr='1234:567::/48',
gateway=gateway_ip,
dns=[fake_network_cache_model.new_ip(
dict(address='2001:4860:4860::8888')),
fake_network_cache_model.new_ip(
dict(address='2001:4860:4860::8844'))],
ips=[ip])
if not gateway:
ipv6_subnet_dict['gateway'] = None
network.add_subnet(fake_network_cache_model.new_subnet(
ipv6_subnet_dict))
# Behave as though CONF.flat_injected is True
network['meta']['injected'] = True
vif = fake_network_cache_model.new_vif({'network': network})
vifs = [vif]
if two_interfaces:
vifs.append(vif)
nwinfo = model.NetworkInfo(vifs)
return netutils.get_injected_network_template(
nwinfo, use_ipv6=use_ipv6, libvirt_virt_type=libvirt_virt_type)
def test_injection_dynamic(self):
expected = None
template = self._setup_injected_network_scenario(should_inject=False)
self.assertEqual(expected, template)
def test_injection_static(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
"""
template = self._setup_injected_network_scenario()
self.assertEqual(expected, template)
def test_injection_static_no_gateway(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
dns-nameservers 1.2.3.4 2.3.4.5
"""
template = self._setup_injected_network_scenario(gateway=False)
self.assertEqual(expected, template)
def test_injection_static_no_dns(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
"""
template = self._setup_injected_network_scenario(dns=False)
self.assertEqual(expected, template)
def test_injection_static_overriden_template(self):
cfg.CONF.set_override(
'injected_network_template',
'nova/tests/unit/network/interfaces-override.template')
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
post-up ip route add 0.0.0.0/24 via 192.168.1.1 dev eth0
pre-down ip route del 0.0.0.0/24 via 192.168.1.1 dev eth0
"""
template = self._setup_injected_network_scenario()
self.assertEqual(expected, template)
def test_injection_static_ipv6(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
iface eth0 inet6 static
hwaddress ether aa:aa:aa:aa:aa:aa
address 1234:567::2
netmask 48
gateway 1234:567::1
dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
"""
template = self._setup_injected_network_scenario(use_ipv6=True)
self.assertEqual(expected, template)
def test_injection_static_ipv6_no_gateway(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
dns-nameservers 1.2.3.4 2.3.4.5
iface eth0 inet6 static
hwaddress ether aa:aa:aa:aa:aa:aa
address 1234:567::2
netmask 48
dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
"""
template = self._setup_injected_network_scenario(use_ipv6=True,
gateway=False)
self.assertEqual(expected, template)
def test_injection_static_with_ipv4_off(self):
expected = None
template = self._setup_injected_network_scenario(use_ipv4=False)
self.assertEqual(expected, template)
def test_injection_ipv6_two_interfaces(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
iface eth0 inet6 static
hwaddress ether aa:aa:aa:aa:aa:aa
address 1234:567::2
netmask 48
gateway 1234:567::1
dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
auto eth1
iface eth1 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
iface eth1 inet6 static
hwaddress ether aa:aa:aa:aa:aa:aa
address 1234:567::2
netmask 48
gateway 1234:567::1
dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
"""
template = self._setup_injected_network_scenario(use_ipv6=True,
two_interfaces=True)
self.assertEqual(expected, template)
def test_injection_ipv6_with_lxc(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
post-up ip -6 route add default via 1234:567::1 dev ${IFACE}
auto eth1
iface eth1 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
post-up ip -6 route add default via 1234:567::1 dev ${IFACE}
"""
template = self._setup_injected_network_scenario(
use_ipv6=True, two_interfaces=True, libvirt_virt_type='lxc')
self.assertEqual(expected, template)
def test_injection_ipv6_with_lxc_no_gateway(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
dns-nameservers 1.2.3.4 2.3.4.5
post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
auto eth1
iface eth1 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
dns-nameservers 1.2.3.4 2.3.4.5
post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
"""
template = self._setup_injected_network_scenario(
use_ipv6=True, gateway=False, two_interfaces=True,
libvirt_virt_type='lxc')
self.assertEqual(expected, template)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the swig wrapper tf_optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training as train
class MemoryOptimizerSwapTest(test.TestCase):
"""Tests the Grappler memory optimizer."""
def testNoSwapping(self):
"""Make sure the graph is preserved when there is nothing to swap."""
a = variables.Variable(10, name='a')
b = variables.Variable(20, name='b')
c = math_ops.add_n([a, b], name='c')
d = math_ops.add_n([b, c], name='d')
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(d)
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
graph_size = len(mg.graph_def.node)
nodes = [node.name for node in mg.graph_def.node]
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL)
graph = tf_optimizer.OptimizeGraph(rewriter_config, mg)
self.assertEqual(len(graph.node), graph_size)
self.assertItemsEqual([node.name for node in graph.node], nodes)
def testSimpleSwap(self):
"""Check that the swap annotations are followed."""
a = variables.Variable(10, name='a')
b = variables.Variable(20, name='b')
c = math_ops.add_n([a, b], name='c')
d = math_ops.add_n([b, c], name='d')
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(d)
d.op.node_def.attr['_swap_to_host'].i = 0
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
graph_size = len(mg.graph_def.node)
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL)
graph = tf_optimizer.OptimizeGraph(rewriter_config, mg)
self.assertEqual(len(graph.node), graph_size + 2)
self.assertTrue(
set([node.name for node in graph.node]) > set(
['a', 'b', 'c', 'd', 'swap_in_d_0', 'swap_out_d_0']))
for node in graph.node:
if node.name == 'swap_in_d_0':
self.assertEqual('swap_out_d_0', node.input[0])
self.assertEqual('^b/read', node.input[1])
elif node.name == 'swap_out_d_0':
self.assertEqual('b/read', node.input[0])
elif node.name == 'd':
self.assertEqual('swap_in_d_0', node.input[0])
self.assertEqual('c', node.input[1])
class MemoryOptimizerRecomputeTest(test.TestCase):
"""Tests the Python interface to recomputation rewrites.
See core/grappler/optimizers/memory_optimizer_test.cc for functional tests.
"""
def _GetMetaGraph(self, batch_size=14, image_dim=12, optimizer_scope_name=''):
"""A simple layered graph with conv, an intermediate op, and a ReLU."""
graph = ops.Graph()
with graph.as_default():
random_seed.set_random_seed(1)
current_activation = variable_scope.get_variable(
name='start', shape=[batch_size, image_dim, image_dim, 5])
conv_filter = variable_scope.get_variable(
name='filter', shape=[5, 5, 5, 5])
for layer_number in range(10):
with variable_scope.variable_scope('layer_{}'.format(layer_number)):
after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1],
'SAME')
current_activation = 2. * after_conv
current_activation = nn.relu(current_activation)
loss = math_ops.reduce_mean(current_activation)
with ops.name_scope(optimizer_scope_name):
optimizer = train.AdamOptimizer(0.001)
train_op = optimizer.minimize(loss)
init_op = variables.global_variables_initializer()
metagraph = train.export_meta_graph()
return (metagraph, init_op.name, train_op.name, loss.name)
def testRewritingDefaultGradientNames(self):
"""Tests that rewriting occurs with default gradient names."""
(original_metagraph, _, _, _) = self._GetMetaGraph()
rewritten_graph_def = tf_optimizer.OptimizeGraph(
rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
memory_optimization=rewriter_config_pb2.RewriterConfig.HEURISTICS),
original_metagraph)
self.assertGreater(
len(rewritten_graph_def.node),
len(original_metagraph.graph_def.node))
self.assertEqual(
0,
len([node for node in original_metagraph.graph_def.node
if 'Recomputed/' in node.name]))
self.assertEqual(
20, # Two per layer
len([node for node in rewritten_graph_def.node
if 'Recomputed/' in node.name]))
def testRewritingNameScopedGradientNames(self):
"""Tests that rewriting occurs with non-standard gradient names."""
(original_metagraph, _, _, _) = self._GetMetaGraph(
optimizer_scope_name='optimizer')
rewritten_graph_def = tf_optimizer.OptimizeGraph(
rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
memory_optimization=rewriter_config_pb2.RewriterConfig.HEURISTICS,
memory_optimizer_target_node_name_prefix='optimizer/gradients/'),
original_metagraph)
self.assertGreater(
len(rewritten_graph_def.node),
len(original_metagraph.graph_def.node))
self.assertEqual(
0,
len([node for node in original_metagraph.graph_def.node
if 'Recomputed/' in node.name]))
self.assertEqual(
20, # Two per layer
len([node for node in rewritten_graph_def.node
if 'Recomputed/' in node.name]))
def _GetMemoryOptimizerSessionConfig(self):
rewrite_options = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
memory_optimization=rewriter_config_pb2.RewriterConfig.HEURISTICS)
graph_options = config_pb2.GraphOptions(rewrite_options=rewrite_options)
return config_pb2.ConfigProto(graph_options=graph_options)
def _RunMetaGraphWithConfig(
self, config, metagraph, init_op_name, train_op_name, loss_op_name):
graph = ops.Graph()
with graph.as_default():
train.import_meta_graph(metagraph)
init_op = graph.get_operation_by_name(init_op_name)
train_op = graph.get_operation_by_name(train_op_name)
loss_op = graph.get_tensor_by_name(loss_op_name)
with session.Session(config=config, graph=graph) as sess:
sess.run(init_op)
sess.run(train_op)
sess.run(train_op)
return sess.run(loss_op)
def testRecomputationRewritingNoErrors(self):
"""Tests that graph output is not significantly different with rewriting."""
(original_metagraph, init_op_name, train_op_name, loss_op_name
) = self._GetMetaGraph()
original_loss = self._RunMetaGraphWithConfig(
config=config_pb2.ConfigProto(),
metagraph=original_metagraph,
init_op_name=init_op_name,
train_op_name=train_op_name,
loss_op_name=loss_op_name)
memory_optimized_loss = self._RunMetaGraphWithConfig(
config=self._GetMemoryOptimizerSessionConfig(),
metagraph=original_metagraph,
init_op_name=init_op_name,
train_op_name=train_op_name,
loss_op_name=loss_op_name)
self.assertAllClose(original_loss, memory_optimized_loss, rtol=1e-4)
if __name__ == '__main__':
test.main()
|
|
import sys
import numpy as np
from numpy.testing import *
def assert_dtype_equal(a, b):
assert_equal(a, b)
assert_equal(hash(a), hash(b),
"two equivalent types do not hash to the same value !")
def assert_dtype_not_equal(a, b):
assert_(a != b)
assert_(hash(a) != hash(b),
"two different types hash to the same value !")
class TestBuiltin(TestCase):
def test_run(self):
"""Only test hash runs at all."""
for t in [np.int, np.float, np.complex, np.int32, np.str, np.object,
np.unicode]:
dt = np.dtype(t)
hash(dt)
def test_dtype(self):
# Make sure equivalent byte order char hash the same (e.g. < and = on
# little endian)
for t in [np.int, np.float]:
dt = np.dtype(t)
dt2 = dt.newbyteorder("<")
dt3 = dt.newbyteorder(">")
if dt == dt2:
self.assertTrue(dt.byteorder != dt2.byteorder, "bogus test")
assert_dtype_equal(dt, dt2)
else:
self.assertTrue(dt.byteorder != dt3.byteorder, "bogus test")
assert_dtype_equal(dt, dt3)
def test_equivalent_dtype_hashing(self):
# Make sure equivalent dtypes with different type num hash equal
uintp = np.dtype(np.uintp)
if uintp.itemsize == 4:
left = uintp
right = np.dtype(np.uint32)
else:
left = uintp
right = np.dtype(np.ulonglong)
self.assertTrue(left == right)
self.assertTrue(hash(left) == hash(right))
def test_invalid_types(self):
# Make sure invalid type strings raise exceptions
for typestr in ['O3', 'O5', 'O7', 'b3', 'h4', 'I5', 'l4', 'l8',
'L4', 'L8', 'q8', 'q16', 'Q8', 'Q16', 'e3',
'f5', 'd8', 't8', 'g12', 'g16',
'NA[u4,0xffffffff]']:
#print typestr
assert_raises(TypeError, np.dtype, typestr)
def test_bad_param(self):
# Can't give a size that's too small
assert_raises(ValueError, np.dtype,
{'names':['f0','f1'],
'formats':['i4', 'i1'],
'offsets':[0,4],
'itemsize':4})
# If alignment is enabled, the alignment (4) must divide the itemsize
assert_raises(ValueError, np.dtype,
{'names':['f0','f1'],
'formats':['i4', 'i1'],
'offsets':[0,4],
'itemsize':9}, align=True)
# If alignment is enabled, the individual fields must be aligned
assert_raises(ValueError, np.dtype,
{'names':['f0','f1'],
'formats':['i1','f4'],
'offsets':[0,2]}, align=True)
class TestRecord(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', np.int)])
b = np.dtype([('yo', np.int)])
assert_dtype_equal(a, b)
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', np.int)])
b = np.dtype([('ye', np.int)])
assert_dtype_not_equal(a, b)
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r','b'],
'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r','b'],
'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
assert_dtype_not_equal(a, b)
def test_not_lists(self):
"""Test if an appropriate exception is raised when passing bad values to
the dtype constructor.
"""
self.assertRaises(TypeError, np.dtype,
dict(names=set(['A', 'B']), formats=['f8', 'i4']))
self.assertRaises(TypeError, np.dtype,
dict(names=['A', 'B'], formats=set(['f8', 'i4'])))
def test_aligned_size(self):
# Check that structured dtypes get padded to an aligned size
dt = np.dtype('i4, i1', align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'names':['f0','f1'],
'formats':['i4', 'u1'],
'offsets':[0,4]}, align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)
assert_equal(dt.itemsize, 8)
# Nesting should preserve that alignment
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=True)
assert_equal(dt1.itemsize, 20)
dt2 = np.dtype({'names':['f0','f1','f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 16]}, align=True)
assert_equal(dt2.itemsize, 20)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 16)}, align=True)
assert_equal(dt3.itemsize, 20)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Nesting should preserve packing
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=False)
assert_equal(dt1.itemsize, 11)
dt2 = np.dtype({'names':['f0','f1','f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 10]}, align=False)
assert_equal(dt2.itemsize, 11)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 10)}, align=False)
assert_equal(dt3.itemsize, 11)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
def test_union_struct(self):
# Should be able to create union dtypes
dt = np.dtype({'names':['f0','f1','f2'], 'formats':['<u4', '<u2','<u2'],
'offsets':[0,0,2]}, align=True)
assert_equal(dt.itemsize, 4)
a = np.array([3], dtype='<u4').view(dt)
a['f1'] = 10
a['f2'] = 36
assert_equal(a['f0'], 10 + 36*256*256)
# Should be able to specify fields out of order
dt = np.dtype({'names':['f0','f1','f2'], 'formats':['<u4', '<u2','<u2'],
'offsets':[4,0,2]}, align=True)
assert_equal(dt.itemsize, 8)
dt2 = np.dtype({'names':['f2','f0','f1'],
'formats':['<u2', '<u4','<u2'],
'offsets':[2,4,0]}, align=True)
vals = [(0,1,2), (3,-1,4)]
vals2 = [(2,0,1), (4,3,-1)]
a = np.array(vals, dt)
b = np.array(vals2, dt2)
assert_equal(a.astype(dt2), b)
assert_equal(b.astype(dt), a)
assert_equal(a.view(dt2), b)
assert_equal(b.view(dt), a)
# Should not be able to overlap objects with other types
assert_raises(TypeError, np.dtype,
{'names':['f0','f1'],
'formats':['O', 'i1'],
'offsets':[0,2]})
assert_raises(TypeError, np.dtype,
{'names':['f0','f1'],
'formats':['i4', 'O'],
'offsets':[0,3]})
assert_raises(TypeError, np.dtype,
{'names':['f0','f1'],
'formats':[[('a','O')], 'i1'],
'offsets':[0,2]})
assert_raises(TypeError, np.dtype,
{'names':['f0','f1'],
'formats':['i4', [('a','O')]],
'offsets':[0,3]})
# Out of order should still be ok, however
dt = np.dtype({'names':['f0','f1'],
'formats':['i1', 'O'],
'offsets':[np.dtype('intp').itemsize, 0]})
def test_comma_datetime(self):
dt = np.dtype('M8[D],datetime64[Y],i8')
assert_equal(dt, np.dtype([('f0', 'M8[D]'),
('f1', 'datetime64[Y]'),
('f2', 'i8')]))
class TestSubarray(TestCase):
def test_single_subarray(self):
a = np.dtype((np.int, (2)))
b = np.dtype((np.int, (2,)))
assert_dtype_equal(a, b)
assert_equal(type(a.subdtype[1]), tuple)
assert_equal(type(b.subdtype[1]), tuple)
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 3)))
assert_dtype_equal(a, b)
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (3, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((np.int, (1, 2, 3)))
b = np.dtype((np.int, (1, 2)))
assert_dtype_not_equal(a, b)
def test_shape_equal(self):
"""Test some data types that are equal"""
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8',tuple())))
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8',1)))
assert_dtype_equal(np.dtype((np.int,2)), np.dtype((np.int,(2,))))
assert_dtype_equal(np.dtype(('<f4',(3,2))), np.dtype(('<f4',(3,2))))
d = ([('a','f4',(1,2)),('b','f8',(3,1))],(3,2))
assert_dtype_equal(np.dtype(d), np.dtype(d))
def test_shape_simple(self):
"""Test some simple cases that shouldn't be equal"""
assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8',(1,))))
assert_dtype_not_equal(np.dtype(('f8',(1,))), np.dtype(('f8',(1,1))))
assert_dtype_not_equal(np.dtype(('f4',(3,2))), np.dtype(('f4',(2,3))))
def test_shape_monster(self):
"""Test some more complicated cases that shouldn't be equal"""
assert_dtype_not_equal(
np.dtype(([('a','f4',(2,1)), ('b','f8',(1,3))],(2,2))),
np.dtype(([('a','f4',(1,2)), ('b','f8',(1,3))],(2,2))))
assert_dtype_not_equal(
np.dtype(([('a','f4',(2,1)), ('b','f8',(1,3))],(2,2))),
np.dtype(([('a','f4',(2,1)), ('b','i8',(1,3))],(2,2))))
assert_dtype_not_equal(
np.dtype(([('a','f4',(2,1)), ('b','f8',(1,3))],(2,2))),
np.dtype(([('e','f8',(1,3)), ('d','f4',(2,1))],(2,2))))
assert_dtype_not_equal(
np.dtype(([('a',[('a','i4',6)],(2,1)), ('b','f8',(1,3))],(2,2))),
np.dtype(([('a',[('a','u4',6)],(2,1)), ('b','f8',(1,3))],(2,2))))
class TestMonsterType(TestCase):
"""Test deeply nested subtypes."""
def test1(self):
simple1 = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
a = np.dtype([('yo', np.int), ('ye', simple1),
('yi', np.dtype((np.int, (3, 2))))])
b = np.dtype([('yo', np.int), ('ye', simple1),
('yi', np.dtype((np.int, (3, 2))))])
assert_dtype_equal(a, b)
c = np.dtype([('yo', np.int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
d = np.dtype([('yo', np.int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
assert_dtype_equal(c, d)
class TestMetadata(TestCase):
def test_no_metadata(self):
d = np.dtype(int)
self.assertEqual(d.metadata, None)
def test_metadata_takes_dict(self):
d = np.dtype(int, metadata={'datum': 1})
self.assertEqual(d.metadata, {'datum': 1})
def test_metadata_rejects_nondict(self):
self.assertRaises(TypeError, np.dtype, int, metadata='datum')
self.assertRaises(TypeError, np.dtype, int, metadata=1)
self.assertRaises(TypeError, np.dtype, int, metadata=None)
def test_nested_metadata(self):
d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])
self.assertEqual(d['a'].metadata, {'datum': 1})
class TestString(TestCase):
def test_complex_dtype_str(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(str(dt),
"[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])]")
# If the sticky aligned flag is set to True, it makes the
# str() function use a dict representation with an 'aligned' flag
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))],
(3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])],
align=True)
assert_equal(str(dt),
"{'names':['top','bottom'], "
"'formats':[([('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)),"
"[('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))]], "
"'offsets':[0,76800], "
"'itemsize':80000, "
"'aligned':True}")
assert_equal(np.dtype(eval(str(dt))), dt)
dt = np.dtype({'names': ['r','g','b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"[(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')]")
dt = np.dtype({'names': ['rgba', 'r','g','b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names':['rgba','r','g','b'],"
" 'formats':['<u4','u1','u1','u1'],"
" 'offsets':[0,0,1,2],"
" 'titles':['Color','Red pixel',"
"'Green pixel','Blue pixel'],"
" 'itemsize':4}")
dt = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names':['r','b'],"
" 'formats':['u1','u1'],"
" 'offsets':[0,2],"
" 'titles':['Red pixel','Blue pixel'],"
" 'itemsize':3}")
dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')])
assert_equal(str(dt),
"[('a', '<m8[D]'), ('b', '<M8[us]')]")
def test_complex_dtype_repr(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(repr(dt),
"dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])])")
dt = np.dtype({'names': ['r','g','b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']},
align=True)
assert_equal(repr(dt),
"dtype([(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')], align=True)")
dt = np.dtype({'names': ['rgba', 'r','g','b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']}, align=True)
assert_equal(repr(dt),
"dtype({'names':['rgba','r','g','b'],"
" 'formats':['<u4','u1','u1','u1'],"
" 'offsets':[0,0,1,2],"
" 'titles':['Color','Red pixel',"
"'Green pixel','Blue pixel'],"
" 'itemsize':4}, align=True)")
dt = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel'],
'itemsize': 4})
assert_equal(repr(dt),
"dtype({'names':['r','b'], "
"'formats':['u1','u1'], "
"'offsets':[0,2], "
"'titles':['Red pixel','Blue pixel'], "
"'itemsize':4})")
dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')])
assert_equal(repr(dt),
"dtype([('a', '<M8[D]'), ('b', '<m8[us]')])")
class TestDtypeAttributeDeletion(object):
def test_dtype_non_writable_attributes_deletion(self):
dt = np.dtype(np.double)
attr = ["subdtype", "descr", "str", "name", "base", "shape",
"isbuiltin", "isnative", "isalignedstruct", "fields",
"metadata", "hasobject"]
if sys.version[:3] == '2.4':
error = TypeError
else:
error = AttributeError
for s in attr:
assert_raises(error, delattr, dt, s)
def test_dtype_writable_attributes_deletion(self):
dt = np.dtype(np.double)
attr = ["names"]
for s in attr:
assert_raises(AttributeError, delattr, dt, s)
if __name__ == "__main__":
run_module_suite()
|
|
"""Voluptuous schemas for the KNX integration."""
from __future__ import annotations
from abc import ABC
from collections import OrderedDict
from typing import Any, ClassVar, Final
import voluptuous as vol
from xknx import XKNX
from xknx.devices.climate import SetpointShiftMode
from xknx.dpt import DPTBase, DPTNumeric
from xknx.exceptions import CouldNotParseAddress
from xknx.io import DEFAULT_MCAST_GRP, DEFAULT_MCAST_PORT
from xknx.telegram.address import IndividualAddress, parse_device_group_address
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES as BINARY_SENSOR_DEVICE_CLASSES,
)
from homeassistant.components.climate.const import HVAC_MODE_HEAT, HVAC_MODES
from homeassistant.components.cover import DEVICE_CLASSES as COVER_DEVICE_CLASSES
from homeassistant.components.number.const import MODE_AUTO, MODE_BOX, MODE_SLIDER
from homeassistant.components.sensor import CONF_STATE_CLASS, STATE_CLASSES_SCHEMA
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_ENTITY_CATEGORY,
CONF_ENTITY_ID,
CONF_HOST,
CONF_MODE,
CONF_NAME,
CONF_PORT,
CONF_TYPE,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ENTITY_CATEGORIES_SCHEMA
from .const import (
CONF_INVERT,
CONF_KNX_EXPOSE,
CONF_KNX_INDIVIDUAL_ADDRESS,
CONF_KNX_ROUTING,
CONF_KNX_TUNNELING,
CONF_RESET_AFTER,
CONF_RESPOND_TO_READ,
CONF_STATE_ADDRESS,
CONF_SYNC_STATE,
CONTROLLER_MODES,
KNX_ADDRESS,
PRESET_MODES,
ColorTempModes,
SupportedPlatforms,
)
##################
# KNX VALIDATORS
##################
def ga_validator(value: Any) -> str | int:
"""Validate that value is parsable as GroupAddress or InternalGroupAddress."""
if isinstance(value, (str, int)):
try:
parse_device_group_address(value)
return value
except CouldNotParseAddress:
pass
raise vol.Invalid(
f"value '{value}' is not a valid KNX group address '<main>/<middle>/<sub>', '<main>/<sub>' "
"or '<free>' (eg.'1/2/3', '9/234', '123'), nor xknx internal address 'i-<string>'."
)
ga_list_validator = vol.All(cv.ensure_list, [ga_validator])
ia_validator = vol.Any(
cv.matches_regex(IndividualAddress.ADDRESS_RE.pattern),
vol.All(vol.Coerce(int), vol.Range(min=1, max=65535)),
msg="value does not match pattern for KNX individual address '<area>.<line>.<device>' (eg.'1.1.100')",
)
def number_limit_sub_validator(entity_config: OrderedDict) -> OrderedDict:
"""Validate a number entity configurations dependent on configured value type."""
value_type = entity_config[CONF_TYPE]
min_config: float | None = entity_config.get(NumberSchema.CONF_MIN)
max_config: float | None = entity_config.get(NumberSchema.CONF_MAX)
step_config: float | None = entity_config.get(NumberSchema.CONF_STEP)
dpt_class = DPTNumeric.parse_transcoder(value_type)
if dpt_class is None:
raise vol.Invalid(f"'type: {value_type}' is not a valid numeric sensor type.")
# Inifinity is not supported by Home Assistant frontend so user defined
# config is required if if xknx DPTNumeric subclass defines it as limit.
if min_config is None and dpt_class.value_min == float("-inf"):
raise vol.Invalid(f"'min' key required for value type '{value_type}'")
if min_config is not None and min_config < dpt_class.value_min:
raise vol.Invalid(
f"'min: {min_config}' undercuts possible minimum"
f" of value type '{value_type}': {dpt_class.value_min}"
)
if max_config is None and dpt_class.value_max == float("inf"):
raise vol.Invalid(f"'max' key required for value type '{value_type}'")
if max_config is not None and max_config > dpt_class.value_max:
raise vol.Invalid(
f"'max: {max_config}' exceeds possible maximum"
f" of value type '{value_type}': {dpt_class.value_max}"
)
if step_config is not None and step_config < dpt_class.resolution:
raise vol.Invalid(
f"'step: {step_config}' undercuts possible minimum step"
f" of value type '{value_type}': {dpt_class.resolution}"
)
return entity_config
def numeric_type_validator(value: Any) -> str | int:
"""Validate that value is parsable as numeric sensor type."""
if isinstance(value, (str, int)) and DPTNumeric.parse_transcoder(value) is not None:
return value
raise vol.Invalid(f"value '{value}' is not a valid numeric sensor type.")
def select_options_sub_validator(entity_config: OrderedDict) -> OrderedDict:
"""Validate a select entity options configuration."""
options_seen = set()
payloads_seen = set()
payload_length = entity_config[SelectSchema.CONF_PAYLOAD_LENGTH]
if payload_length == 0:
max_payload = 0x3F
else:
max_payload = 256 ** payload_length - 1
for opt in entity_config[SelectSchema.CONF_OPTIONS]:
option = opt[SelectSchema.CONF_OPTION]
payload = opt[SelectSchema.CONF_PAYLOAD]
if payload > max_payload:
raise vol.Invalid(
f"'payload: {payload}' for 'option: {option}' exceeds possible"
f" maximum of 'payload_length: {payload_length}': {max_payload}"
)
if option in options_seen:
raise vol.Invalid(f"duplicate item for 'option' not allowed: {option}")
options_seen.add(option)
if payload in payloads_seen:
raise vol.Invalid(f"duplicate item for 'payload' not allowed: {payload}")
payloads_seen.add(payload)
return entity_config
def sensor_type_validator(value: Any) -> str | int:
"""Validate that value is parsable as sensor type."""
if isinstance(value, (str, int)) and DPTBase.parse_transcoder(value) is not None:
return value
raise vol.Invalid(f"value '{value}' is not a valid sensor type.")
sync_state_validator = vol.Any(
vol.All(vol.Coerce(int), vol.Range(min=2, max=1440)),
cv.boolean,
cv.matches_regex(r"^(init|expire|every)( \d*)?$"),
)
##############
# CONNECTION
##############
class ConnectionSchema:
"""Voluptuous schema for KNX connection."""
CONF_KNX_LOCAL_IP = "local_ip"
CONF_KNX_MCAST_GRP = "multicast_group"
CONF_KNX_MCAST_PORT = "multicast_port"
CONF_KNX_RATE_LIMIT = "rate_limit"
CONF_KNX_ROUTE_BACK = "route_back"
CONF_KNX_STATE_UPDATER = "state_updater"
TUNNELING_SCHEMA = vol.Schema(
{
vol.Optional(CONF_PORT, default=DEFAULT_MCAST_PORT): cv.port,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_KNX_LOCAL_IP): cv.string,
vol.Optional(CONF_KNX_ROUTE_BACK, default=False): cv.boolean,
}
)
ROUTING_SCHEMA = vol.Maybe(vol.Schema({vol.Optional(CONF_KNX_LOCAL_IP): cv.string}))
SCHEMA = {
vol.Exclusive(CONF_KNX_ROUTING, "connection_type"): ROUTING_SCHEMA,
vol.Exclusive(CONF_KNX_TUNNELING, "connection_type"): TUNNELING_SCHEMA,
vol.Optional(
CONF_KNX_INDIVIDUAL_ADDRESS, default=XKNX.DEFAULT_ADDRESS
): ia_validator,
vol.Optional(CONF_KNX_MCAST_GRP, default=DEFAULT_MCAST_GRP): cv.string,
vol.Optional(CONF_KNX_MCAST_PORT, default=DEFAULT_MCAST_PORT): cv.port,
vol.Optional(CONF_KNX_STATE_UPDATER, default=True): cv.boolean,
vol.Optional(CONF_KNX_RATE_LIMIT, default=20): vol.All(
vol.Coerce(int), vol.Range(min=1, max=100)
),
}
#############
# PLATFORMS
#############
class KNXPlatformSchema(ABC):
"""Voluptuous schema for KNX platform entity configuration."""
PLATFORM_NAME: ClassVar[str]
ENTITY_SCHEMA: ClassVar[vol.Schema]
@classmethod
def platform_node(cls) -> dict[vol.Optional, vol.All]:
"""Return a schema node for the platform."""
return {
vol.Optional(cls.PLATFORM_NAME): vol.All(
cv.ensure_list, [cls.ENTITY_SCHEMA]
)
}
class BinarySensorSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX binary sensors."""
PLATFORM_NAME = SupportedPlatforms.BINARY_SENSOR.value
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_SYNC_STATE = CONF_SYNC_STATE
CONF_INVERT = CONF_INVERT
CONF_IGNORE_INTERNAL_STATE = "ignore_internal_state"
CONF_CONTEXT_TIMEOUT = "context_timeout"
CONF_RESET_AFTER = CONF_RESET_AFTER
DEFAULT_NAME = "KNX Binary Sensor"
ENTITY_SCHEMA = vol.All(
# deprecated since September 2020
cv.deprecated("significant_bit"),
cv.deprecated("automation"),
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SYNC_STATE, default=True): sync_state_validator,
vol.Optional(CONF_IGNORE_INTERNAL_STATE, default=False): cv.boolean,
vol.Optional(CONF_INVERT, default=False): cv.boolean,
vol.Required(CONF_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_CONTEXT_TIMEOUT): vol.All(
vol.Coerce(float), vol.Range(min=0, max=10)
),
vol.Optional(CONF_DEVICE_CLASS): vol.In(BINARY_SENSOR_DEVICE_CLASSES),
vol.Optional(CONF_RESET_AFTER): cv.positive_float,
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
),
)
class ClimateSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX climate devices."""
PLATFORM_NAME = SupportedPlatforms.CLIMATE.value
CONF_ACTIVE_STATE_ADDRESS = "active_state_address"
CONF_SETPOINT_SHIFT_ADDRESS = "setpoint_shift_address"
CONF_SETPOINT_SHIFT_STATE_ADDRESS = "setpoint_shift_state_address"
CONF_SETPOINT_SHIFT_MODE = "setpoint_shift_mode"
CONF_SETPOINT_SHIFT_MAX = "setpoint_shift_max"
CONF_SETPOINT_SHIFT_MIN = "setpoint_shift_min"
CONF_TEMPERATURE_ADDRESS = "temperature_address"
CONF_TEMPERATURE_STEP = "temperature_step"
CONF_TARGET_TEMPERATURE_ADDRESS = "target_temperature_address"
CONF_TARGET_TEMPERATURE_STATE_ADDRESS = "target_temperature_state_address"
CONF_OPERATION_MODE_ADDRESS = "operation_mode_address"
CONF_OPERATION_MODE_STATE_ADDRESS = "operation_mode_state_address"
CONF_CONTROLLER_STATUS_ADDRESS = "controller_status_address"
CONF_CONTROLLER_STATUS_STATE_ADDRESS = "controller_status_state_address"
CONF_CONTROLLER_MODE_ADDRESS = "controller_mode_address"
CONF_CONTROLLER_MODE_STATE_ADDRESS = "controller_mode_state_address"
CONF_COMMAND_VALUE_STATE_ADDRESS = "command_value_state_address"
CONF_HEAT_COOL_ADDRESS = "heat_cool_address"
CONF_HEAT_COOL_STATE_ADDRESS = "heat_cool_state_address"
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS = (
"operation_mode_frost_protection_address"
)
CONF_OPERATION_MODE_NIGHT_ADDRESS = "operation_mode_night_address"
CONF_OPERATION_MODE_COMFORT_ADDRESS = "operation_mode_comfort_address"
CONF_OPERATION_MODE_STANDBY_ADDRESS = "operation_mode_standby_address"
CONF_OPERATION_MODES = "operation_modes"
CONF_CONTROLLER_MODES = "controller_modes"
CONF_DEFAULT_CONTROLLER_MODE = "default_controller_mode"
CONF_ON_OFF_ADDRESS = "on_off_address"
CONF_ON_OFF_STATE_ADDRESS = "on_off_state_address"
CONF_ON_OFF_INVERT = "on_off_invert"
CONF_MIN_TEMP = "min_temp"
CONF_MAX_TEMP = "max_temp"
DEFAULT_NAME = "KNX Climate"
DEFAULT_SETPOINT_SHIFT_MODE = "DPT6010"
DEFAULT_SETPOINT_SHIFT_MAX = 6
DEFAULT_SETPOINT_SHIFT_MIN = -6
DEFAULT_TEMPERATURE_STEP = 0.1
DEFAULT_ON_OFF_INVERT = False
ENTITY_SCHEMA = vol.All(
# deprecated since September 2020
cv.deprecated("setpoint_shift_step", replacement_key=CONF_TEMPERATURE_STEP),
# deprecated since 2021.6
cv.deprecated("create_temperature_sensors"),
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(
CONF_SETPOINT_SHIFT_MAX, default=DEFAULT_SETPOINT_SHIFT_MAX
): vol.All(int, vol.Range(min=0, max=32)),
vol.Optional(
CONF_SETPOINT_SHIFT_MIN, default=DEFAULT_SETPOINT_SHIFT_MIN
): vol.All(int, vol.Range(min=-32, max=0)),
vol.Optional(
CONF_TEMPERATURE_STEP, default=DEFAULT_TEMPERATURE_STEP
): vol.All(float, vol.Range(min=0, max=2)),
vol.Required(CONF_TEMPERATURE_ADDRESS): ga_list_validator,
vol.Required(CONF_TARGET_TEMPERATURE_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_TARGET_TEMPERATURE_ADDRESS): ga_list_validator,
vol.Inclusive(
CONF_SETPOINT_SHIFT_ADDRESS,
"setpoint_shift",
msg="'setpoint_shift_address' and 'setpoint_shift_state_address' "
"are required for setpoint_shift configuration",
): ga_list_validator,
vol.Inclusive(
CONF_SETPOINT_SHIFT_STATE_ADDRESS,
"setpoint_shift",
msg="'setpoint_shift_address' and 'setpoint_shift_state_address' "
"are required for setpoint_shift configuration",
): ga_list_validator,
vol.Optional(CONF_SETPOINT_SHIFT_MODE): vol.Maybe(
vol.All(vol.Upper, cv.enum(SetpointShiftMode))
),
vol.Optional(CONF_ACTIVE_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_COMMAND_VALUE_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_OPERATION_MODE_ADDRESS): ga_list_validator,
vol.Optional(CONF_OPERATION_MODE_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_CONTROLLER_STATUS_ADDRESS): ga_list_validator,
vol.Optional(CONF_CONTROLLER_STATUS_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_CONTROLLER_MODE_ADDRESS): ga_list_validator,
vol.Optional(CONF_CONTROLLER_MODE_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_HEAT_COOL_ADDRESS): ga_list_validator,
vol.Optional(CONF_HEAT_COOL_STATE_ADDRESS): ga_list_validator,
vol.Optional(
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS
): ga_list_validator,
vol.Optional(CONF_OPERATION_MODE_NIGHT_ADDRESS): ga_list_validator,
vol.Optional(CONF_OPERATION_MODE_COMFORT_ADDRESS): ga_list_validator,
vol.Optional(CONF_OPERATION_MODE_STANDBY_ADDRESS): ga_list_validator,
vol.Optional(CONF_ON_OFF_ADDRESS): ga_list_validator,
vol.Optional(CONF_ON_OFF_STATE_ADDRESS): ga_list_validator,
vol.Optional(
CONF_ON_OFF_INVERT, default=DEFAULT_ON_OFF_INVERT
): cv.boolean,
vol.Optional(CONF_OPERATION_MODES): vol.All(
cv.ensure_list, [vol.In(PRESET_MODES)]
),
vol.Optional(CONF_CONTROLLER_MODES): vol.All(
cv.ensure_list, [vol.In(CONTROLLER_MODES)]
),
vol.Optional(
CONF_DEFAULT_CONTROLLER_MODE, default=HVAC_MODE_HEAT
): vol.In(HVAC_MODES),
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
),
)
class CoverSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX covers."""
PLATFORM_NAME = SupportedPlatforms.COVER.value
CONF_MOVE_LONG_ADDRESS = "move_long_address"
CONF_MOVE_SHORT_ADDRESS = "move_short_address"
CONF_STOP_ADDRESS = "stop_address"
CONF_POSITION_ADDRESS = "position_address"
CONF_POSITION_STATE_ADDRESS = "position_state_address"
CONF_ANGLE_ADDRESS = "angle_address"
CONF_ANGLE_STATE_ADDRESS = "angle_state_address"
CONF_TRAVELLING_TIME_DOWN = "travelling_time_down"
CONF_TRAVELLING_TIME_UP = "travelling_time_up"
CONF_INVERT_POSITION = "invert_position"
CONF_INVERT_ANGLE = "invert_angle"
DEFAULT_TRAVEL_TIME = 25
DEFAULT_NAME = "KNX Cover"
ENTITY_SCHEMA = vol.All(
vol.Schema(
{
vol.Required(
vol.Any(CONF_MOVE_LONG_ADDRESS, CONF_POSITION_ADDRESS),
msg=f"At least one of '{CONF_MOVE_LONG_ADDRESS}' or '{CONF_POSITION_ADDRESS}' is required.",
): object,
},
extra=vol.ALLOW_EXTRA,
),
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MOVE_LONG_ADDRESS): ga_list_validator,
vol.Optional(CONF_MOVE_SHORT_ADDRESS): ga_list_validator,
vol.Optional(CONF_STOP_ADDRESS): ga_list_validator,
vol.Optional(CONF_POSITION_ADDRESS): ga_list_validator,
vol.Optional(CONF_POSITION_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_ANGLE_ADDRESS): ga_list_validator,
vol.Optional(CONF_ANGLE_STATE_ADDRESS): ga_list_validator,
vol.Optional(
CONF_TRAVELLING_TIME_DOWN, default=DEFAULT_TRAVEL_TIME
): cv.positive_float,
vol.Optional(
CONF_TRAVELLING_TIME_UP, default=DEFAULT_TRAVEL_TIME
): cv.positive_float,
vol.Optional(CONF_INVERT_POSITION, default=False): cv.boolean,
vol.Optional(CONF_INVERT_ANGLE, default=False): cv.boolean,
vol.Optional(CONF_DEVICE_CLASS): vol.In(COVER_DEVICE_CLASSES),
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
),
)
class ExposeSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX exposures."""
PLATFORM_NAME = CONF_KNX_EXPOSE
CONF_KNX_EXPOSE_TYPE = CONF_TYPE
CONF_KNX_EXPOSE_ATTRIBUTE = "attribute"
CONF_KNX_EXPOSE_BINARY = "binary"
CONF_KNX_EXPOSE_DEFAULT = "default"
EXPOSE_TIME_TYPES: Final = [
"time",
"date",
"datetime",
]
EXPOSE_TIME_SCHEMA = vol.Schema(
{
vol.Required(CONF_KNX_EXPOSE_TYPE): vol.All(
cv.string, str.lower, vol.In(EXPOSE_TIME_TYPES)
),
vol.Required(KNX_ADDRESS): ga_validator,
}
)
EXPOSE_SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_KNX_EXPOSE_TYPE): vol.Any(
CONF_KNX_EXPOSE_BINARY, sensor_type_validator
),
vol.Required(KNX_ADDRESS): ga_validator,
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_KNX_EXPOSE_ATTRIBUTE): cv.string,
vol.Optional(CONF_KNX_EXPOSE_DEFAULT): cv.match_all,
}
)
ENTITY_SCHEMA = vol.Any(EXPOSE_SENSOR_SCHEMA, EXPOSE_TIME_SCHEMA)
class FanSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX fans."""
PLATFORM_NAME = SupportedPlatforms.FAN.value
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_OSCILLATION_ADDRESS = "oscillation_address"
CONF_OSCILLATION_STATE_ADDRESS = "oscillation_state_address"
CONF_MAX_STEP = "max_step"
DEFAULT_NAME = "KNX Fan"
ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(KNX_ADDRESS): ga_list_validator,
vol.Optional(CONF_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_OSCILLATION_ADDRESS): ga_list_validator,
vol.Optional(CONF_OSCILLATION_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_MAX_STEP): cv.byte,
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
)
class LightSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX lights."""
PLATFORM_NAME = SupportedPlatforms.LIGHT.value
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_BRIGHTNESS_ADDRESS = "brightness_address"
CONF_BRIGHTNESS_STATE_ADDRESS = "brightness_state_address"
CONF_COLOR_ADDRESS = "color_address"
CONF_COLOR_STATE_ADDRESS = "color_state_address"
CONF_COLOR_TEMP_ADDRESS = "color_temperature_address"
CONF_COLOR_TEMP_STATE_ADDRESS = "color_temperature_state_address"
CONF_COLOR_TEMP_MODE = "color_temperature_mode"
CONF_HUE_ADDRESS = "hue_address"
CONF_HUE_STATE_ADDRESS = "hue_state_address"
CONF_RGBW_ADDRESS = "rgbw_address"
CONF_RGBW_STATE_ADDRESS = "rgbw_state_address"
CONF_SATURATION_ADDRESS = "saturation_address"
CONF_SATURATION_STATE_ADDRESS = "saturation_state_address"
CONF_XYY_ADDRESS = "xyy_address"
CONF_XYY_STATE_ADDRESS = "xyy_state_address"
CONF_MIN_KELVIN = "min_kelvin"
CONF_MAX_KELVIN = "max_kelvin"
DEFAULT_NAME = "KNX Light"
DEFAULT_COLOR_TEMP_MODE = "absolute"
DEFAULT_MIN_KELVIN = 2700 # 370 mireds
DEFAULT_MAX_KELVIN = 6000 # 166 mireds
CONF_INDIVIDUAL_COLORS = "individual_colors"
CONF_RED = "red"
CONF_GREEN = "green"
CONF_BLUE = "blue"
CONF_WHITE = "white"
_hs_color_inclusion_msg = (
"'hue_address', 'saturation_address' and 'brightness_address'"
" are required for hs_color configuration"
)
HS_COLOR_SCHEMA = {
vol.Optional(CONF_HUE_ADDRESS): ga_list_validator,
vol.Optional(CONF_HUE_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_SATURATION_ADDRESS): ga_list_validator,
vol.Optional(CONF_SATURATION_STATE_ADDRESS): ga_list_validator,
}
INDIVIDUAL_COLOR_SCHEMA = vol.Schema(
{
vol.Optional(KNX_ADDRESS): ga_list_validator,
vol.Optional(CONF_STATE_ADDRESS): ga_list_validator,
vol.Required(CONF_BRIGHTNESS_ADDRESS): ga_list_validator,
vol.Optional(CONF_BRIGHTNESS_STATE_ADDRESS): ga_list_validator,
}
)
ENTITY_SCHEMA = vol.All(
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(KNX_ADDRESS): ga_list_validator,
vol.Optional(CONF_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_BRIGHTNESS_ADDRESS): ga_list_validator,
vol.Optional(CONF_BRIGHTNESS_STATE_ADDRESS): ga_list_validator,
vol.Exclusive(CONF_INDIVIDUAL_COLORS, "color"): {
vol.Inclusive(
CONF_RED,
"individual_colors",
msg="'red', 'green' and 'blue' are required for individual colors configuration",
): INDIVIDUAL_COLOR_SCHEMA,
vol.Inclusive(
CONF_GREEN,
"individual_colors",
msg="'red', 'green' and 'blue' are required for individual colors configuration",
): INDIVIDUAL_COLOR_SCHEMA,
vol.Inclusive(
CONF_BLUE,
"individual_colors",
msg="'red', 'green' and 'blue' are required for individual colors configuration",
): INDIVIDUAL_COLOR_SCHEMA,
vol.Optional(CONF_WHITE): INDIVIDUAL_COLOR_SCHEMA,
},
vol.Exclusive(CONF_COLOR_ADDRESS, "color"): ga_list_validator,
vol.Optional(CONF_COLOR_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_COLOR_TEMP_ADDRESS): ga_list_validator,
vol.Optional(CONF_COLOR_TEMP_STATE_ADDRESS): ga_list_validator,
vol.Optional(
CONF_COLOR_TEMP_MODE, default=DEFAULT_COLOR_TEMP_MODE
): vol.All(vol.Upper, cv.enum(ColorTempModes)),
**HS_COLOR_SCHEMA,
vol.Exclusive(CONF_RGBW_ADDRESS, "color"): ga_list_validator,
vol.Optional(CONF_RGBW_STATE_ADDRESS): ga_list_validator,
vol.Exclusive(CONF_XYY_ADDRESS, "color"): ga_list_validator,
vol.Optional(CONF_XYY_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_MIN_KELVIN, default=DEFAULT_MIN_KELVIN): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_MAX_KELVIN, default=DEFAULT_MAX_KELVIN): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
),
vol.Any(
vol.Schema(
{vol.Required(KNX_ADDRESS): object},
extra=vol.ALLOW_EXTRA,
),
vol.Schema( # brightness addresses are required in INDIVIDUAL_COLOR_SCHEMA
{vol.Required(CONF_INDIVIDUAL_COLORS): object},
extra=vol.ALLOW_EXTRA,
),
msg="either 'address' or 'individual_colors' is required",
),
vol.Any(
vol.Schema( # 'brightness' is non-optional for hs-color
{
vol.Inclusive(
CONF_BRIGHTNESS_ADDRESS, "hs_color", msg=_hs_color_inclusion_msg
): object,
vol.Inclusive(
CONF_HUE_ADDRESS, "hs_color", msg=_hs_color_inclusion_msg
): object,
vol.Inclusive(
CONF_SATURATION_ADDRESS, "hs_color", msg=_hs_color_inclusion_msg
): object,
},
extra=vol.ALLOW_EXTRA,
),
vol.Schema( # hs-colors not used
{
vol.Optional(CONF_HUE_ADDRESS): None,
vol.Optional(CONF_SATURATION_ADDRESS): None,
},
extra=vol.ALLOW_EXTRA,
),
msg=_hs_color_inclusion_msg,
),
)
class NotifySchema(KNXPlatformSchema):
"""Voluptuous schema for KNX notifications."""
PLATFORM_NAME = SupportedPlatforms.NOTIFY.value
DEFAULT_NAME = "KNX Notify"
ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(KNX_ADDRESS): ga_validator,
}
)
class NumberSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX numbers."""
PLATFORM_NAME = SupportedPlatforms.NUMBER.value
CONF_MAX = "max"
CONF_MIN = "min"
CONF_STEP = "step"
DEFAULT_NAME = "KNX Number"
NUMBER_MODES: Final = [MODE_AUTO, MODE_BOX, MODE_SLIDER]
ENTITY_SCHEMA = vol.All(
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_RESPOND_TO_READ, default=False): cv.boolean,
vol.Optional(CONF_MODE, default=MODE_AUTO): vol.In(NUMBER_MODES),
vol.Required(CONF_TYPE): numeric_type_validator,
vol.Required(KNX_ADDRESS): ga_list_validator,
vol.Optional(CONF_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_MAX): vol.Coerce(float),
vol.Optional(CONF_MIN): vol.Coerce(float),
vol.Optional(CONF_STEP): cv.positive_float,
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
),
number_limit_sub_validator,
)
class SceneSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX scenes."""
PLATFORM_NAME = SupportedPlatforms.SCENE.value
CONF_SCENE_NUMBER = "scene_number"
DEFAULT_NAME = "KNX SCENE"
ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(KNX_ADDRESS): ga_list_validator,
vol.Required(CONF_SCENE_NUMBER): vol.All(
vol.Coerce(int), vol.Range(min=1, max=64)
),
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
)
class SelectSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX selects."""
PLATFORM_NAME = SupportedPlatforms.SELECT.value
CONF_OPTION = "option"
CONF_OPTIONS = "options"
CONF_PAYLOAD = "payload"
CONF_PAYLOAD_LENGTH = "payload_length"
DEFAULT_NAME = "KNX Select"
ENTITY_SCHEMA = vol.All(
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SYNC_STATE, default=True): sync_state_validator,
vol.Optional(CONF_RESPOND_TO_READ, default=False): cv.boolean,
vol.Required(CONF_PAYLOAD_LENGTH): vol.All(
vol.Coerce(int), vol.Range(min=0, max=14)
),
vol.Required(CONF_OPTIONS): [
{
vol.Required(CONF_OPTION): vol.Coerce(str),
vol.Required(CONF_PAYLOAD): cv.positive_int,
}
],
vol.Required(KNX_ADDRESS): ga_list_validator,
vol.Optional(CONF_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
),
select_options_sub_validator,
)
class SensorSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX sensors."""
PLATFORM_NAME = SupportedPlatforms.SENSOR.value
CONF_ALWAYS_CALLBACK = "always_callback"
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_SYNC_STATE = CONF_SYNC_STATE
DEFAULT_NAME = "KNX Sensor"
ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SYNC_STATE, default=True): sync_state_validator,
vol.Optional(CONF_ALWAYS_CALLBACK, default=False): cv.boolean,
vol.Optional(CONF_STATE_CLASS): STATE_CLASSES_SCHEMA,
vol.Required(CONF_TYPE): sensor_type_validator,
vol.Required(CONF_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
)
class SwitchSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX switches."""
PLATFORM_NAME = SupportedPlatforms.SWITCH.value
CONF_INVERT = CONF_INVERT
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
DEFAULT_NAME = "KNX Switch"
ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_INVERT, default=False): cv.boolean,
vol.Optional(CONF_RESPOND_TO_READ, default=False): cv.boolean,
vol.Required(KNX_ADDRESS): ga_list_validator,
vol.Optional(CONF_STATE_ADDRESS): ga_list_validator,
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
)
class WeatherSchema(KNXPlatformSchema):
"""Voluptuous schema for KNX weather station."""
PLATFORM_NAME = SupportedPlatforms.WEATHER.value
CONF_SYNC_STATE = CONF_SYNC_STATE
CONF_KNX_TEMPERATURE_ADDRESS = "address_temperature"
CONF_KNX_BRIGHTNESS_SOUTH_ADDRESS = "address_brightness_south"
CONF_KNX_BRIGHTNESS_EAST_ADDRESS = "address_brightness_east"
CONF_KNX_BRIGHTNESS_WEST_ADDRESS = "address_brightness_west"
CONF_KNX_BRIGHTNESS_NORTH_ADDRESS = "address_brightness_north"
CONF_KNX_WIND_SPEED_ADDRESS = "address_wind_speed"
CONF_KNX_WIND_BEARING_ADDRESS = "address_wind_bearing"
CONF_KNX_RAIN_ALARM_ADDRESS = "address_rain_alarm"
CONF_KNX_FROST_ALARM_ADDRESS = "address_frost_alarm"
CONF_KNX_WIND_ALARM_ADDRESS = "address_wind_alarm"
CONF_KNX_DAY_NIGHT_ADDRESS = "address_day_night"
CONF_KNX_AIR_PRESSURE_ADDRESS = "address_air_pressure"
CONF_KNX_HUMIDITY_ADDRESS = "address_humidity"
DEFAULT_NAME = "KNX Weather Station"
ENTITY_SCHEMA = vol.All(
# deprecated since 2021.6
cv.deprecated("create_sensors"),
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SYNC_STATE, default=True): sync_state_validator,
vol.Required(CONF_KNX_TEMPERATURE_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_BRIGHTNESS_SOUTH_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_BRIGHTNESS_EAST_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_BRIGHTNESS_WEST_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_BRIGHTNESS_NORTH_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_WIND_SPEED_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_WIND_BEARING_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_RAIN_ALARM_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_FROST_ALARM_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_WIND_ALARM_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_DAY_NIGHT_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_AIR_PRESSURE_ADDRESS): ga_list_validator,
vol.Optional(CONF_KNX_HUMIDITY_ADDRESS): ga_list_validator,
vol.Optional(CONF_ENTITY_CATEGORY): ENTITY_CATEGORIES_SCHEMA,
}
),
)
|
|
import os
import xbmc, xbmcgui, mc
import ConfigParser
import common
available_providers = ['Addic7ed', 'BierDopje', 'OpenSubtitles', 'SubsWiki', 'Subtitulos', 'Undertexter']
# Set some default values for the subtitles handling
def register_defaults():
subtitle_provider("get", "default")
subtitle_provider("get", "tv")
subtitle_provider("get", "movie")
common.set_string("subtitles-plugin-language", get_subtitles_language_filter() )
common.set_string("subtitles-plugin", get_subtitles_enabled() )
common.set_string("featured-feed", get_featured_feed() )
common.set_string("featured-name", get_featured_name() )
common.set_string("browser-homepage", "".join(get_browser_homepage().split("http://")) )
if not os.path.exists("/data/etc/.subtitles"):
common.file_put_contents("/data/etc/.subtitles", """[DEFAULT]
lang = All
movieplugins = OpenSubtitles,Undertexter
tvplugins = BierDopje,OpenSubtitles,Addic7ed,Subtitulos,SubsWiki,Undertexter
plugins = BierDopje,OpenSubtitles,Subtitulos,SubsWiki,Addic7ed,Undertexter
[BierDopje]
key = C2FAFCBE34610608
""")
set_home_enabled_strings()
version_local = get_local_version()
if version_local != "":
common.set_string("boxeeplus-version", version_local )
def get_home_enabled_default_list():
return "-,friends|Built-in,watchlater,shows|Built-in,movies|Built-in,music|Built-in,apps,files,web,photos,weather,clock"
def set_home_enabled_strings():
homeitems = get_home_enabled_default_list().split(",")
for item in homeitems:
item = item.split("|")[0]
common.set_string("homeenabled-%s" % item, get_homeenabled(item))
common.set_string("home-%s-replacement" % item, get_homereplacement(item))
def get_jump_to_last_unwatched_value():
jumpenabled = common.file_get_contents("/data/etc/.jump_to_unwatched_enabled")
if jumpenabled == "":
jumpenabled = "0"
return jumpenabled
def toggle_jump_to_last_unwatched():
jumpenabled = get_jump_to_last_unwatched_value()
if jumpenabled == "1":
jumpenabled = "0"
else:
jumpenabled = "1"
common.file_put_contents("/data/etc/.jump_to_unwatched_enabled", jumpenabled)
common.set_string("jump-to-unwatched", jumpenabled)
def get_homeenabled_value():
homeenabled = common.file_get_contents("/data/etc/.home_enabled")
if homeenabled == "":
homeenabled = get_home_enabled_default_list()
return homeenabled.split("\n")[0]
def get_homereplacement(section):
homeenabled = get_homeenabled_value().split(",")
replacement = ""
for item in homeenabled:
item = item.split("|")
if item[0] == section:
if len(item) > 1:
replacement = item[1]
else:
replacement = "Built-in"
if replacement == "":
replacement = "Off"
return replacement
def get_homeenabled(section):
homeenabled = get_homeenabled_value().split(",")
section = "%s" % section
for item in homeenabled:
item = item.split("|")[0]
if item == section:
return "1"
return "0"
def toggle_homeenabled(section, action):
homeenabled = get_homeenabled_value().split(",")
if section in ["friends","shows","movies","music"]:
if section == "friends":
types = ["Built-in", "Netflix", "Vudu", "Navi-X", "Spotify", "Grooveshark", "Pandora", "BBC iPlayer", "Revision3", "Crunchyroll", "Off"]
if section == "shows":
types = ["Built-in", "BBC iPlayer", "Revision3", "Crunchyroll", "Off"]
if section == "movies":
types = ["Built-in", "Netflix", "Vudu", "Navi-X", "Off"]
if section == "music":
types = ["Built-in", "Spotify", "Grooveshark", "Pandora", "Off"]
replacement = get_homereplacement(section)
for item in homeenabled:
itemname = item.split("|")[0]
if itemname == section:
homeenabled.remove(item)
pos = types.index(replacement)
if action == "next":
pos = pos + 1
if action == "previous":
pos = pos - 1
if pos >= len(types):
pos = 0
if pos < 0:
pos = len(types) - 1
if types[pos] != "Off":
homeenabled.append("%s|%s" % (section, types[pos]))
else:
found = 0
for item in homeenabled:
itemname = item.split("|")[0]
if itemname == section:
homeenabled.remove(item)
found = 1
if found == 0:
homeenabled.append(section)
common.file_put_contents("/data/etc/.home_enabled", ",".join(homeenabled))
set_home_enabled_strings()
def get_browser_homepage():
homepage = common.file_get_contents("/data/etc/.browser_homepage")
if homepage == "":
homepage = "http://www.myfav.es/boxee"
return homepage
def set_browser_homepage():
homepage = get_browser_homepage()
kb = xbmc.Keyboard('default', 'heading', True)
kb.setDefault(homepage)
kb.setHeading('Enter homepage URL') # optional
kb.setHiddenInput(False) # optional
kb.doModal()
if kb.isConfirmed():
homepage = kb.getText()
common.file_put_contents("/data/etc/.browser_homepage", homepage)
template = common.file_get_contents("/data/hack/apps/browser2/template.xml")
template = homepage.join(template.split("$URL$"))
common.file_put_contents("/data/hack/apps/browser2/descriptor.xml", template)
os.system("sh /data/hack/apps.sh")
common.set_string("browser-homepage", "".join(get_browser_homepage().split("http://")) )
# Set the password for the telnet functionality
def set_telnet_password():
passwd = common.file_get_contents("/data/etc/passwd")
kb = xbmc.Keyboard('default', 'heading', True)
kb.setDefault(passwd) # optional
kb.setHeading('Enter telnet password') # optional
kb.setHiddenInput(True) # optional
kb.doModal()
if kb.isConfirmed():
passwd = kb.getText()
if passwd == "":
dialog = xbmcgui.Dialog()
ok = dialog.ok('Telnet', 'The telnet password must not be empty.')
else:
common.file_put_contents("/data/etc/passwd", passwd)
# Set the username for youtube subscription
def set_youtube_sub():
youtube = common.file_get_contents("/data/etc/youtube")
yt = xbmc.Keyboard('default', 'heading', True)
yt.setDefault(youtube) # optional
yt.setHeading('Enter YouTube username') # optional
yt.setHiddenInput(False) # optional
yt.doModal()
if yt.isConfirmed():
you = yt.getText()
if you == "":
dialog = xbmcgui.Dialog()
ok = dialog.ok('YouTube', 'You most enter a username.')
else:
common.file_put_contents("/data/etc/youtube", you)
xbmc.executebuiltin("Skin.SetString(youtube,%s)" % you )
# Determine whether subtitle functionality is enabled/enabled
def get_subtitles_enabled():
subtitles = common.file_get_contents("/data/etc/.subtitles_enabled")
if subtitles == "":
subtitles = "0"
return subtitles
def get_subtitles_language_filter():
config = ConfigParser.SafeConfigParser({"lang": "All", "plugins" : "BierDopje,OpenSubtitles", "tvplugins" : "BierDopje,OpenSubtitles", "movieplugins" : "OpenSubtitles" })
if os.path.exists("/data/etc/.subtitles"):
config.read("/data/etc/.subtitles")
langs_config = config.get("DEFAULT", "lang")
if(langs_config.strip() == "" or langs_config.strip() == "All"):
return "0"
else:
return "1"
def featured_next():
replace = get_featured_feed_value()
num = int(replace) + 1
if num > 5: num = 0
replace = "%s" % num
common.file_put_contents("/data/etc/.replace_featured_enabled", replace)
common.set_string("featured-feed", get_featured_feed() )
common.set_string("featured-name", get_featured_name() )
def featured_previous():
replace = get_featured_feed_value()
num = int(replace) - 1
if num < 0: num = 5
replace = "%s" % num
common.file_put_contents("/data/etc/.replace_featured_enabled", replace)
common.set_string("featured-feed", get_featured_feed() )
common.set_string("featured-name", get_featured_name() )
def get_featured_feed():
youtube = common.file_get_contents("/data/etc/youtube")
replace = get_featured_feed_value()
feed = "feed://featured/?limit=15"
if replace == "1": feed = "boxeedb://recent/?limit=15"
if replace == "2": feed = "rss://vimeo.com/channels/staffpicks/videos/rss"
if replace == "3": feed = "rss://gdata.youtube.com/feeds/api/standardfeeds/recently_featured?alt=rss"
if replace == "4": feed = "rss://gdata.youtube.com/feeds/api/users/" + youtube + "/newsubscriptionvideos?alt=rss"
if replace == "5": feed = "about:blank"
return feed
def get_featured_name():
replace = get_featured_feed_value()
name = "Boxee Featured"
if replace == "1": name = "Recently added"
if replace == "2": name = "Vimeo staff picks"
if replace == "3": name = "Youtube featured"
if replace == "4": name = "Youtube subscription"
if replace == "5": name = "Fanart"
return name
def get_featured_feed_value():
replace = common.file_get_contents("/data/etc/.replace_featured_enabled")
if replace == "":
replace = "0"
return replace
# Enable/disable the subtitle functionality
def toggle_subtitles(mode, current):
if mode == "all":
subtitles = get_subtitles_enabled()
if subtitles == "1":
subtitles = "0"
else:
subtitles = "1"
common.file_put_contents("/data/etc/.subtitles_enabled", subtitles)
os.system("sh /data/hack/subtitles.sh")
common.set_string("subtitles-plugin", subtitles)
if mode == "language":
if get_subtitles_language_filter() == "0" and current != "1":
common.set_string("subtitles-plugin-language","1")
else:
config = ConfigParser.SafeConfigParser({"lang": "All", "plugins" : "BierDopje,OpenSubtitles", "tvplugins" : "BierDopje,OpenSubtitles", "movieplugins" : "OpenSubtitles" })
if os.path.exists("/data/etc/.subtitles"):
config.read("/data/etc/.subtitles")
config.set("DEFAULT", "lang", "All")
if os.path.exists("/data/etc/.subtitles"):
configfile = open("/data/etc/.subtitles", "w")
config.write(configfile)
configfile.close()
common.set_string("subtitles-plugin-language","0")
# Edit the subtitle providers
def subtitle_provider(method, section, provider=None):
config = ConfigParser.SafeConfigParser({"lang": "All", "plugins" : "BierDopje,OpenSubtitles", "tvplugins" : "BierDopje,OpenSubtitles", "movieplugins" : "OpenSubtitles" })
if os.path.exists("/data/etc/.subtitles"):
config.read("/data/etc/.subtitles")
plugins = config.get("DEFAULT", "plugins")
plugin_section = "default"
config_section = "plugins"
if section == "tv":
plugins = config.get("DEFAULT", "tvplugins")
plugin_section = "tv"
config_section = "tvplugins"
if section == "movie":
plugins = config.get("DEFAULT", "movieplugins")
plugin_section = "movie"
config_section = "movieplugins"
enabled_providers = plugins.split(',')
if method == "get":
if provider != None:
if provider in enabled_providers:
return 1
else:
return 0
for checkprovider in available_providers:
result = 0
if checkprovider in enabled_providers:
result = 1
common.set_string("subtitles-plugin-%s-%s" % (plugin_section, checkprovider), result)
if method == "set":
provider_status = 1
if provider in enabled_providers:
provider_status = 0
if provider_status == 1:
enabled_providers.append(provider)
common.set_string("subtitles-plugin-%s-%s" % (plugin_section, provider), "1")
else:
enabled_providers.remove(provider)
common.set_string("subtitles-plugin-%s-%s" % (plugin_section, provider), "0")
config.set("DEFAULT", config_section, ",".join(enabled_providers).strip(','))
if os.path.exists("/data/etc/.subtitles"):
configfile = open("/data/etc/.subtitles", "w")
config.write(configfile)
configfile.close()
# Get the remote version number from github
def get_remote_version():
import urllib2
u = urllib2.urlopen('http://dl.boxeed.in/version')
version_remote = "%s" % u.read()
return version_remote
# Get the version number for the locally installed version
def get_local_version():
version_local = common.file_get_contents("/data/hack/version")
return version_local
# Check for newer version
def check_new_version():
version_remote = get_remote_version()
version_local = get_local_version()
version_remote_parts = version_remote.split(".")
version_local_parts = version_local.split(".")
hasnew = 0
if version_remote_parts[0] > version_local_parts[0]:
hasnew = 1
elif version_remote_parts[0] == version_local_parts[0]:
if version_remote_parts[1] > version_local_parts[1]:
hasnew = 1
elif version_remote_parts[1] == version_local_parts[1]:
if version_remote_parts[2] > version_local_parts[2]:
hasnew = 1
issame = 0
if version_remote_parts[0] == version_local_parts[0]:
if version_remote_parts[1] == version_local_parts[1]:
if version_remote_parts[2] == version_local_parts[2]:
issame = 1
dialog = xbmcgui.Dialog()
if hasnew:
if dialog.yesno("BOXEE+HACKS Version", "A new version of BOXEE+ is available. Upgrade to %s now?" % (version_remote)):
os.system("sh /data/hack/upgrade.sh")
elif issame:
dialog.ok("BOXEE+HACKS Version", "Your BOXEE+ version is up to date.")
else:
dialog.ok("BOXEE+HACKS Version", "Hi there Doc Brown. How's the future?")
def shutdown():
os.system("poweroff")
def reboot():
os.system("reboot")
if (__name__ == "__main__"):
command = sys.argv[1]
if command == "telnet": set_telnet_password()
if command == "youtube": set_youtube_sub()
if command == "subtitles": toggle_subtitles(sys.argv[2], sys.argv[3])
if command == "version": check_new_version()
if command == "defaults": register_defaults()
if command == "subtitles-provider": subtitle_provider("set", sys.argv[2], sys.argv[3])
if command == "featured_next": featured_next()
if command == "featured_previous": featured_previous()
if len(sys.argv) == 4:
if command == "homeenabled": toggle_homeenabled(sys.argv[2], sys.argv[3])
else:
if command == "homeenabled": toggle_homeenabled(sys.argv[2], "")
if command == "browser-homepage": set_browser_homepage()
if command == "toggle-jump-to-last-unwatched": toggle_jump_to_last_unwatched()
if command == "shutdown": shutdown()
if command == "reboot": reboot()
|
|
#----------------------------------------------------------------------
# Copyright (c) 2016 Inria/iMinds by Arthur Garnier
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
"""
The GPO Reference Aggregate Manager v3, showing how to implement
the GENI AM API version 3. This AggregateManager has only fake resources.
Invoked from gcf-am.py
The GENI AM API is defined in the AggregateManager class.
"""
# Note: This AM uses SFA authorization to check that the caller
# has appropriate credentials to make the call. If this AM is used in
# conjunction with the policy-based authorization capability (in gcf.geni.auth)
# then this code needs to only extract expiration times from the credentials
# which can be done using the gcf.sfa.credential module
from __future__ import absolute_import
import sys
from extendedresource import ExtendedResource, FIXED_PROXY_USER
from terms_conditions.terms_conditions_helper import TermsAndConditionsHelper
from terms_conditions.terms_conditions_site_request_handler import SecureXMLRPCAndTermsAndConditionsSiteRequestHandler
sys.path.insert(1, '../geni-tools/src')
import base64
import collections
import datetime
import os
import traceback
import xml.dom.minidom as minidom
import zlib
import ConfigParser
import threading
import time
import gcf.geni.am.am3 as am3
import pickle
import Pyro4
from StringIO import StringIO
from lxml import etree
from gcf.geni.am.aggregate import Aggregate
from dockermaster import DockerMaster
from gcf_to_docker import DockerManager
from gcf.geni.util.urn_util import publicid_to_urn
from gcf.geni.util import urn_util as urn
from gcf.sfa.trust.credential import Credential
from gcf.gcf_version import GCF_VERSION
from gcf.geni.auth.base_authorizer import *
from gcf.geni.am.api_error_exception import ApiErrorException
from shutil import copyfile
# See sfa/trust/rights.py
# These are names of operations
# from the rights.py privilege_table
# Credentials may list privileges that
# map to these operations, giving the caller permission
# to perform the functions
RENEWSLIVERPRIV = am3.RENEWSLIVERPRIV
# Map the Allocate, Provision and POA calls to the CreateSliver privilege.
ALLOCATE_PRIV = am3.ALLOCATE_PRIV
PROVISION_PRIV = am3.PROVISION_PRIV
PERFORM_ACTION_PRIV = am3.PERFORM_ACTION_PRIV
DELETESLIVERPRIV = am3.DELETESLIVERPRIV
SLIVERSTATUSPRIV = am3.SLIVERSTATUSPRIV
SHUTDOWNSLIVERPRIV = am3.SHUTDOWNSLIVERPRIV
# Publicid format resource namespace. EG Resource URNs
# will be <namespace>:resource:<resourcetype>_<resourceid>
# This is something like the name of your AM
# See gen-certs.CERT_AUTHORITY
RESOURCE_NAMESPACE = am3.RESOURCE_NAMESPACE
# MAX LEASE is 7 days (arbitrarily)
REFAM_MAXLEASE_MINUTES = am3.REFAM_MAXLEASE_MINUTES
# Expiration on Allocated resources is 10 minutes.
ALLOCATE_EXPIRATION_SECONDS = am3.ALLOCATE_EXPIRATION_SECONDS
# GENI Allocation States
STATE_GENI_UNALLOCATED = am3.STATE_GENI_UNALLOCATED
STATE_GENI_ALLOCATED = am3.STATE_GENI_ALLOCATED
STATE_GENI_PROVISIONED = am3.STATE_GENI_PROVISIONED
# GENI Operational States
# These are in effect when the allocation state is PROVISIONED.
OPSTATE_GENI_PENDING_ALLOCATION = am3.OPSTATE_GENI_PENDING_ALLOCATION
OPSTATE_GENI_NOT_READY = am3.OPSTATE_GENI_NOT_READY
OPSTATE_GENI_CONFIGURING = am3.OPSTATE_GENI_CONFIGURING
OPSTATE_GENI_STOPPING = am3.OPSTATE_GENI_STOPPING
OPSTATE_GENI_READY = am3.OPSTATE_GENI_READY
OPSTATE_GENI_READY_BUSY = am3.OPSTATE_GENI_READY_BUSY
OPSTATE_GENI_FAILED = am3.OPSTATE_GENI_FAILED
EXPIRE_LOCK = threading.Lock()
DUMP_LOCK= threading.Lock()
ALLOCATE_LOCK = threading.Lock()
RSPEC_V3_NAMESPACE_URI = "http://www.geni.net/resources/rspec/3"
#increment CODE_VERSION whenever changing something that impacts the stored data
STATE_CODE_VERSION = '1'
STATE_FILENAME = 'am-state-v{}.dat'.format(STATE_CODE_VERSION)
class DockerAggregateManager(am3.ReferenceAggregateManager):
def __init__(self, root_cert, urn_authority, url, **kwargs):
"""
Create a testbed AggregateManager ("AM"), which supports docker containers and simple raw resources
:param root_cert: is a single cert or dir of multiple certs that are trusted to sign credentials
:type root_cert: ?
:param urn_authority: the tla/tla part of the authority urn. For "urn:publicid:IDN+example.com+authority+am" this must be "example.com"
:type urn_authority: string
:param url: the URL at which the AM runs.
:type url: string
:param kwargs:
"""
super(DockerAggregateManager,self).__init__(root_cert,urn_authority,url,**kwargs)
self._hrn = urn_authority
self._urn_authority = "IDN "+urn_authority
self._my_urn = publicid_to_urn("%s %s %s" % (self._urn_authority, 'authority', 'am'))
self.DockerManager = DockerManager()
self.proxy_dockermaster = None
self.terms_and_conditions_site_enabled = False
self.disallow_users_if_terms_and_conditions_not_accepted = False
thread_sliver_daemon = threading.Thread(target=self.expireSliversDaemon)
thread_sliver_daemon.daemon=True
thread_sliver_daemon.start()
try:
self.logger.info("Restoring AM state from \"{}\"...".format(STATE_FILENAME))
s=open(STATE_FILENAME, 'rb')
p = pickle.Unpickler(s)
self._agg = p.load()
self._slices = p.load()
self.proxy_dockermaster = p.load()
self.public_url = p.load()
self.terms_and_conditions_site_enabled = p.load()
self.disallow_users_if_terms_and_conditions_not_accepted = p.load()
s.close()
except Exception as e:
self.logger.info(str(e))
self.logger.info("Restoring AM state FAILED: Loading new instance...")
self._agg = Aggregate()
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.abspath(__file__))+"/docker_am_config")
self.public_url = None
for r in config.sections():
def config_fetch(option_name, if_missing=None):
if not config.has_option(r, option_name) \
or config.get(r, option_name) is None \
or config.get(r, option_name) == "":
return if_missing
else:
return config.get(r, option_name)
def config_fetch_bool(option_name, if_missing=None):
res = config_fetch(option_name, if_missing)
if res is None:
return res
return str(res).lower() in ("yes", "true", "t", "1")
if r == 'general':
self.public_url = config_fetch("public_url")
self.terms_and_conditions_site_enabled = \
config_fetch_bool("terms_and_conditions_site_enabled", if_missing=False)
self.disallow_users_if_terms_and_conditions_not_accepted = \
config_fetch_bool("disallow_users_if_terms_and_conditions_not_accepted", if_missing=False)
pass
else:
#Both proxy and resources need a dockermanager config
if config_fetch("dockermaster_pyro4_host") is None:
# No host specified, so a new object is created locally,
# which means docker runs on the local host instead of on a remote host
dockermanager = DockerManager()
else:
# Host specified, so also use a DockerManager object,
# but use PYRO to use one on a remote host instead of a local one.
# This means that this uses docker on a remote host
uri = "PYRO:dockermanager@" + config.get(r, "dockermaster_pyro4_host") + ":" + config.get(r, "dockermaster_pyro4_port")
dockermanager = Pyro4.Proxy(uri)
dockermanager._pyroHmacKey = config.get(r, "dockermaster_pyro4_password")
if r.startswith("proxy"):
if self.proxy_dockermaster is not None:
raise Exception('Only 1 proxy section in config is supported')
# proxy_type meaning:
# slice means one new proxy per slice.
# global means one fixed proxy.
proxy_type = config_fetch("type")
if proxy_type is None or not proxy_type in [ 'global', 'slice' ]:
raise Exception("Invalid config: none or unknown proxy type specified in section \"{}\"".format(r))
if proxy_type == 'global':
raise Exception(
"Valid proxy type {} specified in section \"{}\" is not yet supported".format(proxy_type,r))
if proxy_type == 'slice':
self.proxy_dockermaster = DockerMaster(int(config.get(r, "max_containers", 20)),
config_fetch('node_ipv4_hostname'),
None, #no ipv6 prefix
int(config_fetch('starting_ipv4_port', '2222')),
dockermanager)
pass
else:
self._agg.add_resources([DockerMaster(int(config_fetch("max_containers", 20)),
config_fetch("node_ipv4_hostname"),
config_fetch("ipv6_prefix"),
int(config_fetch('starting_ipv4_port', '12000')),
dockermanager)])
#Here you can add the example resource. (You have to delete STATE_FILENAME to reload resources)
#self._agg.add_resources([ResourceExample(str(uuid.uuid4()), "127.0.0.1")])
self.dumpState()
if self.public_url is None:
self.public_url = self._url
self.logger.warn("Warning: no public_url in docker_am_config. Will use '%s' as URL", self.public_url)
if self.terms_and_conditions_site_enabled:
# Make the XML-RPC server also serve some generic HTTP requests (used to serve terms_conditions site)
self.logger.info("Enabling Terms and Conditions site")
self.custom_request_handler_class = SecureXMLRPCAndTermsAndConditionsSiteRequestHandler
self.logger.info("Running %s AM v%d code version %s", self._am_type, self._api_version, GCF_VERSION)
# The list of credentials are options - some single cred
# must give the caller required permissions.
# The semantics of the API are unclear on this point, so
# this is just the current implementation
def ListResources(self, credentials, options):
'''Return an RSpec of resources managed at this AM.
If geni_available is specified in the options,
then only report available resources. If geni_compressed
option is specified, then compress the result.'''
self.logger.info('ListResources(%r)' % (options))
self.expire_slivers()
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
# could require list or listnodes?
privileges = ()
self.getVerifiedCredentials(None,
credentials,
options,
privileges)
# If we get here, the credentials give the caller
# all needed privileges to act on the given target.
if 'geni_rspec_version' not in options:
# This is a required option, so error out with bad arguments.
self.logger.error('No geni_rspec_version supplied to ListResources.')
return self.errorResult(am3.AM_API.BAD_ARGS,
'Bad Arguments: option geni_rspec_version was not supplied.')
if 'type' not in options['geni_rspec_version']:
self.logger.error('ListResources: geni_rspec_version does not contain a type field.')
return self.errorResult(am3.AM_API.BAD_ARGS,
'Bad Arguments: option geni_rspec_version does not have a type field.')
if 'version' not in options['geni_rspec_version']:
self.logger.error('ListResources: geni_rspec_version does not contain a version field.')
return self.errorResult(am3.AM_API.BAD_ARGS,
'Bad Arguments: option geni_rspec_version does not have a version field.')
# Look to see what RSpec version the client requested
# Error-check that the input value is supported.
rspec_type = options['geni_rspec_version']['type']
if isinstance(rspec_type, basestring):
rspec_type = rspec_type.lower().strip()
rspec_version = options['geni_rspec_version']['version']
if rspec_type != 'geni':
self.logger.error('ListResources: Unknown RSpec type %s requested', rspec_type)
return self.errorResult(am3.AM_API.BAD_VERSION,
'Bad Version: requested RSpec type %s is not a valid option.' % (rspec_type))
if rspec_version != '3':
self.logger.error('ListResources: Unknown RSpec version %s requested', rspec_version)
return self.errorResult(am3.AM_API.BAD_VERSION,
'Bad Version: requested RSpec version %s is not a valid option.' % (rspec_version))
self.logger.info("ListResources requested RSpec %s (%s)", rspec_type, rspec_version)
if 'geni_slice_urn' in options:
self.logger.error('ListResources: geni_slice_urn is no longer a supported option.')
msg = 'Bad Arguments:'
msg += 'option geni_slice_urn is no longer a supported option.'
msg += ' Use "Describe" instead.'
return self.errorResult(am3.AM_API.BAD_ARGS, msg)
all_resources = self._agg.catalog(None)
show_only_available = 'geni_available' in options and options['geni_available']
adv_header = self.advert_header()
for resource in all_resources:
if show_only_available and not resource.available:
continue
adv_header.append(resource.genAdvertNode(self._urn_authority, self._my_urn))
result = etree.tostring(adv_header, pretty_print=True, xml_declaration=True, encoding='utf-8')
# Optionally compress the result
if 'geni_compressed' in options and options['geni_compressed']:
try:
result = base64.b64encode(zlib.compress(result))
except Exception as exc:
self.logger.error("Error compressing and encoding resource list: %s", traceback.format_exc())
raise Exception("Server error compressing resource list", exc)
return self.successResult(result)
# The list of credentials are options - some single cred
# must give the caller required permissions.
# The semantics of the API are unclear on this point, so
# this is just the current implementation
def Allocate(self, slice_urn, credentials, rspec, options):
"""Allocate slivers to the given slice according to the given RSpec.
Return an RSpec of the actually allocated resources.
:param slice_urn: the slice in which to allocate the resources
:type slice_urn: string
:param rspec: the request RSpec
:type rspec: string
:param credentials: the credential(s) that authorise the caller to use the slice specified in slice_urn
:type credentials: list of dict
:param options: optional parameters for the Allocate call
:type options: dict
"""
self.logger.info('Allocate(%r)' % (slice_urn))
self.expire_slivers()
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
privileges = (ALLOCATE_PRIV,)
creds=self.getVerifiedCredentials(slice_urn, credentials, options, privileges)
# If we get here, the credentials give the caller
# all needed privileges to act on the given target.
# Grab the user_urn
user_urn = gid.GID(string=options['geni_true_caller_cert']).get_urn()
if self.disallow_users_if_terms_and_conditions_not_accepted:
testbed_access_ok = TermsAndConditionsHelper.get().has_testbed_access(user_urn)
if not testbed_access_ok:
self.logger.error("Cannot create sliver. No testbed access for user '%s'" % user_urn)
return self.errorResult(am3.AM_API.REFUSED,
'[T&C-APPROVAL-MISSING] '
'Approval of the Terms & Conditions is required in order to use this testbed. '
'Please visit '+self.public_url+'terms_conditions/index.html')
rspec_dom = None
try:
rspec_dom = minidom.parseString(rspec)
#TODO: remember to call rspec_dom.unlink() once no part of it is still needed. This speeds up freeing its memory.
except Exception as exc:
self.logger.error("Cannot create sliver %s. Exception parsing rspec: %s" % (slice_urn, exc))
return self.errorResult(am3.AM_API.BAD_ARGS,
'Bad Args: RSpec is unparseable')
# Allow only Geni v3 request RSpec
# expected root node is thus: <rspec type="request" xmlns="http://www.geni.net/resources/rspec/3">
# rspec_nodelist = rspec_dom.getElementsByTagNameNS(RSPEC_V3_NAMESPACE_URI, "rspec") # type: NodeList
# if (rspec_nodelist.length == 0):
# return self.errorResult(am3.AM_API.BAD_ARGS, 'Bad Args: no RSpec element found')
# if (rspec_nodelist.length > 1):
# return self.errorResult(am3.AM_API.BAD_ARGS, 'Bad Args: multiple RSpec element found')
# rspec_element = rspec_nodelist.item(0) # type : Element
rspec_element = rspec_dom.documentElement # type : Element
if rspec_element is None:
return self.errorResult(am3.AM_API.BAD_ARGS, 'Bad Args: no root RSpec element found')
if not rspec_element.hasAttribute("type"):
return self.errorResult(am3.AM_API.BAD_ARGS, 'Bad Args: rspec element has no "type" argument')
if rspec_element.getAttribute("type") != "request":
return self.errorResult(am3.AM_API.BAD_ARGS, 'Bad Args: rspec element has type "'+rspec_element.getAttribute("type")+'" instead of "request"')
ALLOCATE_LOCK.acquire()
available = self.resources(available=True)
available = sorted(available, key=lambda a: a.size(), reverse=True)
# Note: We only care about nodes for this component manager.
# nodes without component_manager_id or with a component_manager_id of another AM are ignored
local_nodes = list()
for node_elem in rspec_dom.documentElement.getElementsByTagName('node'):
if node_elem.getAttribute("component_manager_id") == self._my_urn:
local_nodes.append(node_elem)
# If there are no nodes in the RSpec that we should handle, the AM specification does not tell us what to do.
# In general, it is best to throw the SEARCH_FAILED error, because silently doing nothing is potentially too confusing.
if len(local_nodes)==0:
ALLOCATE_LOCK.release()
return self.errorResult(am3.AM_API.SEARCH_FAILED, "No requested resource can be allocated on this AM. "
"Check your request (usually bad component_manager_id)")
resources = list()
images_to_delete = list()
def abort_resource_allocation():
for r in resources:
r.deallocate()
ALLOCATE_LOCK.release()
proxy_resource = None
if self.proxy_dockermaster is not None:
proxy_resource = self.proxy_dockermaster.matchResource()
if proxy_resource is None:
abort_resource_allocation()
return self.errorResult(am3.AM_API.TOO_BIG, 'Too Big: insufficient resources to fulfill request (not enough resources to create proxy)')
proxy_resource.is_proxy = True
proxy_resource.external_id = None
proxy_resource.available = False
proxy_resource.chosen_sliver_type='docker-container'
proxy_resource.image = None
resources.append(proxy_resource)
for node_elem in local_nodes:
client_id = node_elem.getAttribute('client_id')
if client_id == "" or client_id is None:
abort_resource_allocation()
return self.errorResult(am3.AM_API.BAD_ARGS, "A node does not have a client_id")
if len(node_elem.getElementsByTagName('sliver_type')) < 1:
abort_resource_allocation()
return self.errorResult(am3.AM_API.BAD_ARGS,
"The node '{}' does not have a sliver_type".format(client_id))
sliver_type = node_elem.getElementsByTagName('sliver_type')[0]
image = None
if sliver_type != "":
if len(sliver_type.getElementsByTagName("disk_image")) == 1:
image = sliver_type.getElementsByTagName("disk_image")[0].getAttribute("name")
images_to_delete.append(image)
sliver_type = sliver_type.getAttribute('name')
# notE: basestring handles both str and unicode
if sliver_type is None \
or not isinstance(sliver_type, basestring) \
or sliver_type == "":
self.logger.info('Bad sliver_type="%s" (%r) (type=%s)', sliver_type, sliver_type, type(sliver_type))
abort_resource_allocation()
return self.errorResult(am3.AM_API.BAD_ARGS,
"The node '{}' does not have a valid sliver_type".format(client_id))
self.logger.info('Checking node with sliver_type="%s"', sliver_type)
component_id = node_elem.getAttribute('component_id')
if component_id == "": component_id=None
exclusive = node_elem.getAttribute('exclusive') # type : string
if exclusive == "": exclusive=None
if exclusive is not None:
exclusive = exclusive.lower() in ['true', '1', 't', 'y', 'yes']
resource = None # type: ExtendedResource
for r in available:
resource = r.matchResource(sliver_type, component_id, exclusive)
if resource is None:
# Search next available resource
continue
else:
#resource found
if component_id is not None and (resource.id != component_id):
abort_resource_allocation()
return self.errorResult(5, #5 = SERVERERROR
"Server ERROR: {} != {}".format(resource.id, component_id))
try:
#Resource returned by a resource pool are not listed in "available" list,
# so ignore Exception
available.remove(resource)
except:
pass
break
if resource is None: # There aren't enough resources
self.logger.error('Too big: not enought %s available',sliver_type)
abort_resource_allocation()
return self.errorResult(am3.AM_API.TOO_BIG, 'Too Big: insufficient resources to fulfill request')
resource.external_id = client_id
resource.available = False
resource.chosen_sliver_type=sliver_type
resource.image=image
resource.proxy_resource = proxy_resource
resources.append(resource)
ALLOCATE_LOCK.release()
# determine the start time as bounded by slice expiration and 'now'
now = datetime.datetime.utcnow()
start_time = now
if 'geni_start_time' in options:
# # Need to parse this into datetime
# start_time_raw = options['geni_start_time']
# start_time = self._naiveUTC(dateutil.parser.parse(start_time_raw))
return self.errorResult(am3.AM_API.BAD_ARGS,
"geni_start_time is not supported")
# determine max expiration time from credentials
# do not create a sliver that will outlive the slice!
expiration = self.min_expire(creds, self.max_alloc,
('geni_end_time' in options
and options['geni_end_time']))
# determine end time as min of the slice
# and the requested time (if any)
end_time = self.min_expire(creds, None,
('geni_end_time' in options
and options['geni_end_time']))
# if slice exists, check accept only if no existing sliver overlaps
# with requested start/end time. If slice doesn't exist, create it
if slice_urn in self._slices:
newslice = self._slices[slice_urn]
# Check if any current slivers overlap with requested start/end
one_slice_overlaps = False
for sliver in newslice.slivers():
if sliver.startTime() < end_time and \
sliver.endTime() > start_time:
one_slice_overlaps = True
break
if one_slice_overlaps:
ALLOCATE_LOCK.acquire()
for sliver in newslice.slivers():
sliver.resource().deallocate()
ALLOCATE_LOCK.release()
# template = "Slice %s already has slivers at requested time"
template = "Slice %s already has slivers"
self.logger.error(template % (slice_urn))
return self.errorResult(am3.AM_API.ALREADY_EXISTS,
template % (slice_urn))
else:
newslice = Slice(slice_urn)
for resource in resources:
sliver = newslice.add_resource(resource)
if resource.image is not None:
resource.image = slice_urn+"::"+resource.image
sliver.setExpiration(expiration)
sliver.setStartTime(start_time)
sliver.setEndTime(end_time)
sliver.setAllocationState(STATE_GENI_ALLOCATED)
for i in images_to_delete:
if i.startswith("http://") or i.startswith("https://") and i not in newslice.images_to_delete:
newslice.images_to_delete.append(i)
self._agg.allocate(slice_urn, newslice.resources())
self._agg.allocate(user_urn, newslice.resources())
newslice.request_rspec = rspec
self._slices[slice_urn] = newslice
# Log the allocation
self.logger.info("Allocated new slice %s" % slice_urn)
for sliver in newslice.slivers():
self.logger.info("Allocated resource %s to slice %s as sliver %s",
sliver.resource().id, slice_urn, sliver.urn())
manifest = self.manifest_rspec(slice_urn)
self.dumpState()
result = dict(geni_rspec=manifest,
geni_slivers=[s.status() for s in newslice.slivers()])
return self.successResult(result)
def provision_install_execute_sliver(self, the_slice, sliver):
def getXmlNode(client_id, manifest=the_slice.request_rspec):
assert the_slice is not None
assert manifest is not None
for node in etree.parse(StringIO(manifest)).getroot().getchildren():
if node.get("client_id")==client_id:
return node
return None
def getServiceInstall(etreeNode):
ns="{"+etreeNode.nsmap.get(None)+"}"
services = etreeNode.find(ns+"services")
if services is None:
return []
else:
ret = list()
for install in services.findall(ns+'install'):
ret.append([install.get('url'), install.get('install_path')])
return ret
def getServiceExecute(etreeNode):
ns="{"+etreeNode.nsmap.get(None)+"}"
services = etreeNode.find(ns+"services")
if services is None:
return []
else:
ret = list()
for install in services.findall(ns+'execute'):
ret.append([install.get('shell'), install.get('command')])
return ret
if sliver.resource().provision() is not True:
sliver.setOperationalState(OPSTATE_GENI_FAILED)
sliver.resource().deprovision()
return
if sliver.resource().waitForSshConnection() is not True:
sliver.setOperationalState(OPSTATE_GENI_FAILED)
sliver.resource().deprovision()
return
sliver.setOperationalState(OPSTATE_GENI_READY_BUSY)
self.dumpState()
client_id = sliver.resource().external_id
if client_id is not None:
assert client_id is not None
assert isinstance(client_id, basestring)
node_xml = getXmlNode(client_id)
assert node_xml is not None
# assert isinstance(node_xml, etree.Node)
for i in getServiceInstall(node_xml):
ret = sliver.resource().installCommand(i[0], i[1])
if ret is not True:
sliver.setOperationalState(OPSTATE_GENI_FAILED)
sliver.resource().error = ret
else:
sliver.resource().error = ""
self.dumpState()
sliver.setOperationalState(OPSTATE_GENI_READY)
for i in getServiceExecute(node_xml):
sliver.resource().executeCommand(i[0], i[1])
else:
sliver.setOperationalState(OPSTATE_GENI_READY)
def Provision(self, urns, credentials, options):
"""Allocate slivers to the given slice according to the given RSpec.
Return an RSpec of the actually allocated resources.
"""
self.logger.info('Provision(%r)' % (urns))
self.expire_slivers()
the_slice, slivers = self.decode_urns(urns)
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
privileges = (PROVISION_PRIV,)
creds = self.getVerifiedCredentials(the_slice.urn, credentials, options, privileges)
if 'geni_rspec_version' not in options:
# This is a required option, so error out with bad arguments.
self.logger.error('No geni_rspec_version supplied to Provision.')
return self.errorResult(am3.AM_API.BAD_ARGS,
'Bad Arguments: option geni_rspec_version was not supplied.')
if 'type' not in options['geni_rspec_version']:
self.logger.error('Provision: geni_rspec_version does not contain a type field.')
return self.errorResult(am3.AM_API.BAD_ARGS,
'Bad Arguments: option geni_rspec_version does not have a type field.')
if 'version' not in options['geni_rspec_version']:
self.logger.error('Provision: geni_rspec_version does not contain a version field.')
return self.errorResult(am3.AM_API.BAD_ARGS,
'Bad Arguments: option geni_rspec_version does not have a version field.')
# Look to see what RSpec version the client requested
# Error-check that the input value is supported.
rspec_type = options['geni_rspec_version']['type']
if isinstance(rspec_type, basestring):
rspec_type = rspec_type.lower().strip()
rspec_version = options['geni_rspec_version']['version']
if rspec_type != 'geni':
self.logger.error('Provision: Unknown RSpec type %s requested', rspec_type)
return self.errorResult(am3.AM_API.BAD_VERSION,
'Bad Version: requested RSpec type %s is not a valid option.' % (rspec_type))
if rspec_version != '3':
self.logger.error('Provision: Unknown RSpec version %s requested', rspec_version)
return self.errorResult(am3.AM_API.BAD_VERSION,
'Bad Version: requested RSpec version %s is not a valid option.' % (rspec_version))
self.logger.info("Provision requested RSpec %s (%s)", rspec_type, rspec_version)
# Only provision slivers that are in the scheduled time frame
now = datetime.datetime.utcnow()
provisionable_slivers = \
[sliver for sliver in slivers \
if now >= sliver.startTime() and now <= sliver.endTime()]
slivers = provisionable_slivers
if len(slivers) == 0:
return self.errorResult(am3.AM_API.UNAVAILABLE,
"No slivers available to provision at this time")
max_expiration = self.min_expire(creds, self.max_lease,
('geni_end_time' in options
and options['geni_end_time']))
for sliver in slivers:
# Extend the lease and set to PROVISIONED
expiration = min(sliver.endTime(), max_expiration)
sliver.setEndTime(expiration)
sliver.setExpiration(expiration)
sliver.setAllocationState(STATE_GENI_PROVISIONED)
# Configure user and ssh keys on nodes (dockercontainer)
user_keys_dict = dict()
if 'geni_users' in options:
for user in options['geni_users']:
if 'keys' in user and len(user['keys'])>0:
user_keys_dict[urn.URN(urn=user['urn']).getName()] = user['keys']
if user_keys_dict:
for sliver in slivers:
if sliver.operationalState() == OPSTATE_GENI_CONFIGURING:
continue
sliver.setOperationalState(OPSTATE_GENI_CONFIGURING)
#pre-provision should be fast, so we don't do it on a seperate thread
if sliver.resource().is_proxy:
allkeys = []
for userurn, keylist in user_keys_dict.items():
allkeys.extend(keylist)
new_user_keys_dict = { FIXED_PROXY_USER : allkeys }
sliver.resource().preprovision(new_user_keys_dict)
else:
sliver.resource().preprovision(user_keys_dict)
#provision might be slow, so we do it on a seperate thread
threading.Thread(target=self.provision_install_execute_sliver,
args=[the_slice, sliver]).start()
else:
return self.errorResult(am3.AM_API.BAD_ARGS, "No user (with SSH key) provided")
self.dumpState()
result = dict(geni_rspec=self.manifest_rspec(the_slice.urn, provision=True),
geni_slivers=[s.status() for s in slivers])
return self.successResult(result)
def GetVersion(self, options):
'''Specify version information about this AM. That could
include API version information, RSpec format and version
information, etc. Return a dict.'''
self.logger.info("Called GetVersion")
reqver = [dict(type="GENI",
version="3",
schema="http://www.geni.net/resources/rspec/3/request.xsd",
namespace="http://www.geni.net/resources/rspec/3",
extensions=[])]
adver = [dict(type="GENI",
version="3",
schema="http://www.geni.net/resources/rspec/3/ad.xsd",
namespace="http://www.geni.net/resources/rspec/3",
extensions=[])]
api_versions = dict()
api_versions[str(self._api_version)] = self.public_url
credential_types = [dict(geni_type = Credential.SFA_CREDENTIAL_TYPE,
geni_version = "3")]
versions = dict(geni_api=self._api_version,
geni_api_versions=api_versions,
hrn=self._hrn,
urn=self._my_urn,
geni_am_type='gcf',
geni_am_code=GCF_VERSION,
geni_request_rspec_versions=reqver,
geni_ad_rspec_versions=adver,
geni_credential_types=credential_types)
result = self.successResult(versions)
# Add the top-level 'geni_api' per the AM API spec.
result['geni_api'] = versions['geni_api']
return result
def PerformOperationalAction(self, urns, credentials, action, options):
"""Peform the specified action on the set of objects specified by
urns.
"""
self.logger.info('PerformOperationalAction(%r)' % (urns))
self.expire_slivers()
the_slice, slivers = self.decode_urns(urns)
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
privileges = (PERFORM_ACTION_PRIV,)
_ = self.getVerifiedCredentials(the_slice.urn, credentials, options, privileges)
# A place to store errors on a per-sliver basis.
# {sliverURN --> "error", sliverURN --> "error", etc.}
astates = []
ostates = []
if action == 'geni_start':
astates = [STATE_GENI_PROVISIONED]
ostates = [OPSTATE_GENI_NOT_READY, OPSTATE_GENI_READY, OPSTATE_GENI_CONFIGURING]
elif action == 'geni_restart':
astates = [STATE_GENI_PROVISIONED]
ostates = [OPSTATE_GENI_READY]
elif action == 'geni_stop':
astates = [STATE_GENI_PROVISIONED]
ostates = [OPSTATE_GENI_READY]
elif action == 'geni_update_users':
astates = [STATE_GENI_PROVISIONED]
ostates = [OPSTATE_GENI_READY]
elif action == 'geni_reload':
astates = [STATE_GENI_PROVISIONED]
ostates = [OPSTATE_GENI_READY]
else:
msg = "Unsupported: action %s is not supported" % (action)
raise ApiErrorException(am3.AM_API.UNSUPPORTED, msg)
# Handle best effort. Look ahead to see if the operation
# can be done. If the client did not specify best effort and
# any resources are in the wrong state, stop and return an error.
# But if the client specified best effort, trundle on and
# do the best you can do.
errors = collections.defaultdict(str)
for sliver in slivers:
# ensure that the slivers are provisioned
if (sliver.allocationState() not in astates
or sliver.operationalState() not in ostates):
msg = "%d: Sliver %s is not in the right state for action %s (current state = %s %s)."
msg = msg % (am3.AM_API.UNSUPPORTED, sliver.urn(), action, sliver.allocationState(), sliver.operationalState())
errors[sliver.urn()] = msg
best_effort = False
if 'geni_best_effort' in options:
best_effort = bool(options['geni_best_effort'])
if not best_effort and errors:
raise ApiErrorException(am3.AM_API.UNSUPPORTED,
"\n".join(errors.values()))
def thread_restart(sliver):
ret = sliver.resource().restart()
if not ret:
sliver.setOperationalState(OPSTATE_GENI_FAILED)
return
#now wait until container is up again
if sliver.resource().waitForSshConnection() is not True:
sliver.setOperationalState(OPSTATE_GENI_FAILED)
sliver.resource().deprovision()
return
sliver.setOperationalState(OPSTATE_GENI_READY)
self.dumpState()
# Perform the state changes:
for sliver in slivers:
if (action == 'geni_start'):
if (sliver.allocationState() in astates
and sliver.operationalState() in ostates):
pass
elif (action == 'geni_reload'):
if (sliver.allocationState() in astates
and sliver.operationalState() in ostates):
sliver.setOperationalState(OPSTATE_GENI_CONFIGURING)
threading.Thread(target=self.provision_install_execute_sliver,
args=[the_slice, sliver]).start()
elif (action == 'geni_restart'):
if (sliver.allocationState() in astates
and sliver.operationalState() in ostates):
sliver.setOperationalState(OPSTATE_GENI_CONFIGURING)
threading.Thread(target=thread_restart, args=[sliver]).start()
elif (action == 'geni_stop'):
if (sliver.allocationState() in astates
and sliver.operationalState() in ostates):
try:
#not perfect: deprovision also prevents reprovisioning
sliver.resource().deprovision()
except:
#ignore errors when deprovisioning
pass
sliver.setOperationalState(OPSTATE_GENI_NOT_READY)
elif (action == 'geni_update_users'):
user_keys_dict = dict()
if 'geni_users' in options:
for user in options['geni_users']:
if 'keys' in user and len(user['keys'])>0:
user_keys_dict[urn.URN(urn=user['urn']).getName()] = user['keys']
if sliver.resource().is_proxy:
allkeys = sliver.resource().user_keys_dict[FIXED_PROXY_USER]
if allkeys is None:
self.logger.warn('geni_update_users allkeys init: Failed to find existing user keys.')
allkeys = []
else:
self.logger.info('geni_update_users allkeys init: Found %d existing user keys.' % len(allkeys))
for userurn, keylist in user_keys_dict.items():
allkeys.extend(keylist)
new_user_keys_dict = {FIXED_PROXY_USER: allkeys}
self.logger.info('Updating proxy sliver keys %d' % len(allkeys))
sliver.resource().updateUser(new_user_keys_dict, force=True)
else:
self.logger.info('Updating sliver keys %d' % len(user_keys_dict))
sliver.resource().updateUser(user_keys_dict)
else:
# This should have been caught above
msg = "Unsupported: action %s is not supported" % (action)
raise ApiErrorException(am3.AM_API.UNSUPPORTED, msg)
self.dumpState()
return self.successResult([s.status(errors[s.urn()])
for s in slivers])
def Describe(self, urns, credentials, options):
"""Generate a manifest RSpec for the given resources.
"""
self.logger.info('Describe(%r)' % (urns))
self.expire_slivers()
# APIv3 spec says that a slice with nothing local should
# give an empty manifest, not an error
try:
the_slice, slivers = self.decode_urns(urns)
except ApiErrorException as ae:
if ae.code == am3.AM_API.SEARCH_FAILED and "Unknown slice" in ae.output:
# This is ok
slivers = []
the_slice = Slice(urns[0])
else:
raise ae
privileges = (SLIVERSTATUSPRIV,)
self.getVerifiedCredentials(the_slice.urn, credentials, options, privileges)
if 'geni_rspec_version' not in options:
# This is a required option, so error out with bad arguments.
self.logger.error('No geni_rspec_version supplied to Describe.')
return self.errorResult(am3.AM_API.BAD_ARGS,
'Bad Arguments: option geni_rspec_version was not supplied.')
if 'type' not in options['geni_rspec_version']:
self.logger.error('Describe: geni_rspec_version does not contain a type field.')
return self.errorResult(am3.AM_API.BAD_ARGS,
'Bad Arguments: option geni_rspec_version does not have a type field.')
if 'version' not in options['geni_rspec_version']:
self.logger.error('Describe: geni_rspec_version does not contain a version field.')
return self.errorResult(am3.AM_API.BAD_ARGS,
'Bad Arguments: option geni_rspec_version does not have a version field.')
# Look to see what RSpec version the client requested
# Error-check that the input value is supported.
rspec_type = options['geni_rspec_version']['type']
if isinstance(rspec_type, basestring):
rspec_type = rspec_type.lower().strip()
rspec_version = options['geni_rspec_version']['version']
if rspec_type != 'geni':
self.logger.error('Describe: Unknown RSpec type %s requested', rspec_type)
return self.errorResult(am3.AM_API.BAD_VERSION,
'Bad Version: requested RSpec type %s is not a valid option.' % (rspec_type))
if rspec_version != '3':
self.logger.error('Describe: Unknown RSpec version %s requested', rspec_version)
return self.errorResult(am3.AM_API.BAD_VERSION,
'Bad Version: requested RSpec version %s is not a valid option.' % (rspec_version))
self.logger.info("Describe requested RSpec %s (%s)", rspec_type, rspec_version)
manifest = self.manifest_rspec(the_slice.getURN(), provision=True)
self.logger.debug("Result is now \"%s\"", manifest)
# Optionally compress the manifest
if 'geni_compressed' in options and options['geni_compressed']:
try:
manifest = base64.b64encode(zlib.compress(manifest))
except Exception as exc:
self.logger.error("Error compressing and encoding resource list: %s", traceback.format_exc())
raise Exception("Server error compressing resource list", exc)
value = dict(geni_rspec=manifest,
geni_urn=the_slice.urn,
geni_slivers=[s.status() for s in slivers])
return self.successResult(value)
def Delete(self, urns, credentials, options):
"""Stop and completely delete the named slivers and/or slice."""
self.logger.info('Delete(%r)' % (urns))
self.expire_slivers()
the_slice, slivers = self.decode_urns(urns)
privileges = (DELETESLIVERPRIV,)
self.getVerifiedCredentials(the_slice.urn, credentials, options, privileges)
# Grab the user_urn
user_urn = gid.GID(string=options['geni_true_caller_cert']).get_urn()
# If we get here, the credentials give the caller
# all needed privileges to act on the given target.
if the_slice.isShutdown():
self.logger.info("Slice %s not deleted because it is shutdown",
the_slice.urn)
return self.errorResult(am3.AM_API.UNAVAILABLE,
("Unavailable: Slice %s is unavailable."
% (the_slice.urn)))
resources = [sliver.resource() for sliver in slivers]
self._agg.deallocate(the_slice.urn, resources)
self._agg.deallocate(user_urn, resources)
delete_ev = threading.Event()
def thread_delete(slivers):
for sliver in slivers:
slyce = sliver.slice()
slyce.delete_sliver(sliver)
delete_ev.set()
# If slice is now empty, delete it.
if not slyce.slivers():
try:
for i in self._slices[slyce.urn].images_to_delete:
self.DockerManager.deleteImage(slyce.urn+"::"+i)
except:
pass
self.logger.debug("Deleting empty slice %r", slyce.urn)
del self._slices[slyce.urn]
self.dumpState()
threading.Thread(target=thread_delete, args=[slivers]).start()
#Wait, unless it takes too long (0.5 seconds)
delete_ev.wait(timeout=0.5)
return self.successResult([s.status() for s in slivers])
def Status(self, urns, credentials, options):
'''Report as much as is known about the status of the resources
in the sliver. The AM may not know.
Return a dict of sliver urn, status, and a list of dicts resource
statuses.'''
# Loop over the resources in a sliver gathering status.
self.logger.info('Status(%r)' % (urns))
self.expire_slivers()
the_slice, slivers = self.decode_urns(urns)
privileges = (SLIVERSTATUSPRIV,)
self.getVerifiedCredentials(the_slice.urn, credentials, options, privileges)
geni_slivers = list()
for sliver in slivers:
expiration = self.rfc3339format(sliver.expiration())
start_time = self.rfc3339format(sliver.startTime())
# end_time = self.rfc3339format(sliver.endTime())
allocation_state = sliver.allocationState()
operational_state = sliver.operationalState()
error = sliver.resource().error
geni_slivers.append(dict(geni_sliver_urn=sliver.urn(),
geni_expires=expiration,
geni_start_time=start_time,
# geni_end_time=end_time,
geni_allocation_status=allocation_state,
geni_operational_status=operational_state,
geni_error=''))
result = dict(geni_urn=the_slice.urn,
geni_slivers=[s.status(s.resource().error) for s in slivers])
return self.successResult(result)
def Renew(self, urns, credentials, expiration_time, options):
'''Renew the local sliver that is part of the named Slice
until the given expiration time (in UTC with a TZ per RFC3339).
Requires at least one credential that is valid until then.
Return False on any error, True on success.'''
out = super(DockerAggregateManager,self).Renew(urns, credentials, expiration_time, options)
self.dumpState()
return out
# See https://www.protogeni.net/trac/protogeni/wiki/RspecAdOpState
def advert_header(self):
schema_locs = ["http://www.geni.net/resources/rspec/3",
"http://www.geni.net/resources/rspec/3/ad.xsd",
"http://www.geni.net/resources/rspec/ext/opstate/1",
"http://www.geni.net/resources/rspec/ext/opstate/1/ad.xsd"]
adv_header = etree.Element("rspec", nsmap={None : "http://www.geni.net/resources/rspec/3", "xsi" : "http://www.w3.org/2001/XMLSchema-instance", "ns3" : "http://www.protogeni.net/resources/rspec/ext/emulab/1"}, attrib={"{http://www.w3.org/2001/XMLSchema-instance}schemaLocation" : ' '.join(schema_locs)})
adv_header.set("type", "advertisement")
rspec_opstate = etree.SubElement(adv_header, "rspec_opstate", nsmap={None : "http://www.geni.net/resources/rspec/ext/opstate/1"})
rspec_opstate.set("aggregate_manager_id", self._my_urn)
rspec_opstate.set("start", "geni_notready")
etree.SubElement(rspec_opstate, "sliver_type").set("name", "dockercontainer")
state = etree.SubElement(rspec_opstate, "state")
state.set("name", "geni_notready")
action = etree.SubElement(state, "action")
action.set("name", "geni_start")
action.set("next", "geni_ready")
etree.SubElement(action, "description").text ="Transition the node to a ready state."
etree.SubElement(state, "description").text = "DockerContainers are immediately ready once started."
state=etree.SubElement(rspec_opstate, "state")
state.set("name", "geni_ready")
etree.SubElement(state, "description").text = "DockerContainer node is up and ready to use."
action = etree.SubElement(state, "action")
action.set("name", "geni_restart")
action.set("next", "geni_ready")
etree.SubElement(action, "description").text = "Reboot the node"
action = etree.SubElement(state, "action")
action.set("name", "geni_stop")
action.set("next", "geni_notready")
etree.SubElement(action, "description").text = "Power down or stop the node."
return adv_header
def manifest_rspec(self, slice_urn, provision=False):
rspec = etree.parse(StringIO(self._slices[slice_urn].request_rspec))
rspec.getroot().set("type", "manifest")
ns=rspec.getroot().nsmap.get(None)
services = rspec.getroot().xpath("x:node/x:services", namespaces={'x':ns})
i_exec = 0
for s in services:
executes= s.xpath("x:execute", namespaces={'x':ns})
if len(executes) > 0:
for e in executes:
tmp = etree.Element("{http://www.fed4fire.eu/docker_am}execute_logs")
tmp.set("log","/tmp/startup-"+str(i_exec)+".txt")
tmp.set("status","/tmp/startup-"+str(i_exec)+".status")
tmp.set("command","/tmp/startup-"+str(i_exec)+".sh")
# e.getparent().remove(e)
s.append(tmp)
i_exec+=1
for node in rspec.getroot().getchildren():
for s in self._slices[slice_urn].slivers():
if node.get("client_id") == s.resource().external_id and node.get("component_manager_id") == self._my_urn:
node.set("component_id", s.resource().urn(self._urn_authority))
node.set("sliver_id", s.urn())
if provision:
services = None
for c in node.getchildren():
if c.tag == "{"+ns+"}"+"services":
services = c
break
if services is None:
services = etree.Element("services")
services.extend(s.resource().manifestAuth())
s.resource().addManifestProxyServiceElements(services)
node.append(services)
return etree.tostring(rspec, pretty_print=True, xml_declaration=True, encoding='utf-8')
def resources(self, available=None):
"""Get the list of managed resources. If available is not None,
it is interpreted as boolean and only resources whose availability
matches will be included in the returned list.
"""
result = list(self._agg.catalog())
if available is not None:
result = [r for r in result if r.available is available]
return result
def expire_slivers(self):
"""Look for expired slivers and clean them up. Ultimately this
should be run by a daemon, but until then, it is called at the
beginning of all methods.
"""
if EXPIRE_LOCK.locked():
return None
EXPIRE_LOCK.acquire()
expired = list()
now = datetime.datetime.utcnow()
for slyce in self._slices.values():
for sliver in slyce.slivers():
self.logger.debug('Checking sliver %s (expiration = %r) at %r',
sliver.urn(), sliver.expiration(), now)
if sliver.expiration() < now:
self.logger.debug('Expring sliver %s (expiration = %r) at %r',
sliver.urn(), sliver.expiration(), now)
expired.append(sliver)
dump=False
if len(expired)>0:
self.logger.info('Expiring %d slivers', len(expired))
dump=True
for sliver in expired:
slyce = sliver.slice()
slyce.delete_sliver(sliver)
# If slice is now empty, delete it.
if len(slyce.slivers()) == 0:
self.logger.debug("Deleting empty slice %r", slyce.urn)
try:
for i in self._slices[slyce.urn].images_to_delete:
self.DockerManager.deleteImage(slyce.urn+"::"+i)
except:
pass
del self._slices[slyce.urn]
if dump: #If something has changed, save data
self.dumpState()
EXPIRE_LOCK.release()
def dumpState(self):
DUMP_LOCK.acquire()
try:
TMP_STATE_FILENAME = STATE_FILENAME+".tmp"
open(TMP_STATE_FILENAME, 'w').close()
s = open(TMP_STATE_FILENAME, "wb")
p = pickle.Pickler(s, pickle.HIGHEST_PROTOCOL)
p.dump(self._agg)
p.dump(self._slices)
p.dump(self.proxy_dockermaster)
p.dump(self.public_url)
p.dump(self.terms_and_conditions_site_enabled)
p.dump(self.disallow_users_if_terms_and_conditions_not_accepted)
s.close()
copyfile(TMP_STATE_FILENAME, STATE_FILENAME)
except RuntimeError:
print 'error in DumpState'
pass
DUMP_LOCK.release()
def expireSliversDaemon(self):
while True:
time.sleep(300)
self.expire_slivers()
class Slice(am3.Slice):
def __init__(self, urn):
super(Slice,self).__init__(urn)
self.request_rspec = None
self.images_to_delete = list()
|
|
from __future__ import absolute_import
import time
from sentry.utils.iterators import chunked
from sentry.utils.redis import load_script
index = load_script('similarity/index.lua')
def band(n, value):
assert len(value) % n == 0
return list(chunked(value, len(value) / n))
class MinHashIndex(object):
def __init__(self, cluster, namespace, signature_builder, bands, interval, retention):
self.cluster = cluster
self.namespace = namespace
self.signature_builder = signature_builder
self.bands = bands
self.interval = interval
self.retention = retention
def classify(self, scope, items, timestamp=None):
if timestamp is None:
timestamp = int(time.time())
arguments = [
'CLASSIFY',
timestamp,
self.namespace,
self.bands,
self.interval,
self.retention,
scope,
]
for idx, features in items:
arguments.append(idx)
arguments.extend([
','.join(map('{}'.format, b))
for b in
band(self.bands, self.signature_builder(features))
])
return [
[(item, float(score)) for item, score in result]
for result in
index(
self.cluster.get_local_client_for_key(scope),
[],
arguments,
)
]
def compare(self, scope, key, indices, timestamp=None):
if timestamp is None:
timestamp = int(time.time())
arguments = [
'COMPARE',
timestamp,
self.namespace,
self.bands,
self.interval,
self.retention,
scope,
key,
]
arguments.extend(indices)
return [
[(item, float(score)) for item, score in result]
for result in index(
self.cluster.get_local_client_for_key(scope),
[],
arguments,
)
]
def record(self, scope, key, items, timestamp=None):
if not items:
return # nothing to do
if timestamp is None:
timestamp = int(time.time())
arguments = [
'RECORD',
timestamp,
self.namespace,
self.bands,
self.interval,
self.retention,
scope,
key,
]
for idx, features in items:
arguments.append(idx)
arguments.extend(
[
','.join(map('{}'.format, b))
for b in band(self.bands, self.signature_builder(features))
]
)
return index(
self.cluster.get_local_client_for_key(scope),
[],
arguments,
)
def merge(self, scope, destination, items, timestamp=None):
if timestamp is None:
timestamp = int(time.time())
arguments = [
'MERGE',
timestamp,
self.namespace,
self.bands,
self.interval,
self.retention,
scope,
destination,
]
for idx, source in items:
arguments.extend([idx, source])
return index(
self.cluster.get_local_client_for_key(scope),
[],
arguments,
)
def delete(self, scope, items, timestamp=None):
if timestamp is None:
timestamp = int(time.time())
arguments = [
'DELETE',
timestamp,
self.namespace,
self.bands,
self.interval,
self.retention,
scope,
]
for idx, key in items:
arguments.extend([idx, key])
return index(
self.cluster.get_local_client_for_key(scope),
[],
arguments,
)
def export(self, scope, items, timestamp=None):
if timestamp is None:
timestamp = int(time.time())
arguments = [
'EXPORT',
timestamp,
self.namespace,
self.bands,
self.interval,
self.retention,
scope,
]
for idx, key in items:
arguments.extend([idx, key])
return index(
self.cluster.get_local_client_for_key(scope),
[],
arguments,
)
def import_(self, scope, items, timestamp=None):
if timestamp is None:
timestamp = int(time.time())
arguments = [
'IMPORT',
timestamp,
self.namespace,
self.bands,
self.interval,
self.retention,
scope,
]
for idx, key, data in items:
arguments.extend([idx, key, data])
return index(
self.cluster.get_local_client_for_key(scope),
[],
arguments,
)
|
|
#! /usr/bin/python3
# * database.py * #
# * Nicholas DiBari * #
# * --------------------------------------------- * #
# * Provides user interface for Get Quote * #
# * DataBase Management * #
# * --------------------------------------------- * #
import argparse
import sys
import settings
from utils import DBClient
def create_arg_parser():
"""
Create parser for command line arguments
- Return an ArgumentParser object that will determine what function to run
"""
description = 'The Easy to use Database Manager'
parser = argparse.ArgumentParser(
prog='./database.py',
usage='%(prog)s [-i --interactive] [-p --print] [-s --search <author>] [-d --delete] [--dump <output file>]',
description=description
)
parser.add_argument(
'-i',
'--interactive',
action='store_true',
default=False,
dest='interactive_mode',
help='Open your database interface'
)
parser.add_argument(
'-p',
'--print',
action='store_true',
default=False,
dest='print_db',
help='Print all quotes from your database'
)
parser.add_argument(
'-s',
'--search',
action='store',
type=str,
default='',
nargs='*',
dest='author',
help='Search your database for quotes matching author'
)
parser.add_argument(
'-d',
'--delete',
action='store_true',
default=False,
dest='delete',
help='Run delete interface to remove quotes from your database'
)
parser.add_argument(
'--dump',
action='store',
type=str,
default='',
dest='output_file',
help='Save contents of your database to a text file.'
)
return parser
def print_quotes(db_client):
"""
Print all quotes from the database
"""
quotes = db_client.get_all_quotes()
if not quotes:
print('Your database is empty!')
return
print('ID | Author | Quote | Created At |')
for quote in quotes:
print(quote)
print('-' * 45)
def delete_quotes(db_client):
"""
Delete a specific quote from the database
TODO: Add option to send author name to function as kwarg
"""
quotes = db_client.get_all_quotes()
print('ID | Author | Quote')
for quote in quotes:
print(quote)
choice = input('Please select the number of the quote to delete: ')
confirm = input('Are you sure you want to delete this quote (y/n): ')
if confirm.lower() == 'y':
db_client.delete_quote_from_database(choice)
print('Deleted quote {}'.format(choice))
def search_quotes(db_client, to_search=None):
"""
Search database for all quotes from an author and write them to the console
:param db_client: (DBClient) Connection to database
:param to_search: Name of author to search database for matching quotes
"""
flag = False
if to_search:
flag = True # Account for search argument from command line
while True:
if not to_search:
to_search = input('Please enter an author to search for: ')
quotes = db_client.get_quotes_for_author(to_search)
if not quotes:
print('Sorry, did not find {} in the database.'.format(to_search))
else:
print('Found the following quotes by {}'.format(to_search))
print('-' * 45)
for quote in quotes:
print(quote)
print('-' * 45)
if flag:
break
choice = input('Would you like you search again? (y/n): ')
if choice.lower() == 'n':
break
else:
to_search = None
def dump_quotes(db_client, file_name=None):
"""
Write all quotes in the database to a text file
:param db_client: (DBClient) Connection to database
:param file_name: (str) Name of file to write data
"""
if not file_name:
file_name = input('Please enter the filename to save the quotes to: ')
if not file_name.endswith('.txt'):
file_name += '.txt'
with open(file_name, 'w') as f:
for quote in db_client.get_all_quotes():
f.write('{0}: {1}\n'.format(quote.author, quote.quote))
f.write('-' * 90 + '\n')
print('Done! Your quotes can be found in {}'.format(file_name))
def interactive_mode(db_client):
"""
Loop to run the functionality in a shell-like mode
"""
flag = True
while flag:
print('Please enter a choice:')
print('1. Print all Quotes')
print('2. Delete a Quote')
print('3. Search for author')
print('4. Dump Database to text file')
print('5. [EXIT]')
choice = input('> ')
try:
choice = int(choice)
# ERROR CHECK
if choice < 1 or choice > 5:
print('Sorry that is not a valid choice. Try again')
# PRINT QUOTES
elif choice == 1:
print_quotes(db_client)
# DELETE QUOTE
elif choice == 2:
delete_quotes(db_client)
# SEARCH QUOTE
elif choice == 3:
search_quotes(db_client)
# DUMP DATABASE
elif choice == 4:
dump_quotes(db_client)
# [EXIT]
elif choice == 5:
flag = False
# ERROR CHECK
else:
print('Sorry that is not a valid input. Try again')
except ValueError:
print('Enter in a number silly!')
def main():
"""
Driver function for script
Determines to run interactive shell or to call specific function using
command line arguments
"""
db_client = DBClient(settings.DB_NAME)
parser = create_arg_parser()
args = parser.parse_args(sys.argv[1:])
if args.interactive_mode:
interactive_mode(db_client)
elif args.print_db:
print_quotes(db_client)
elif args.author:
author = ' '.join(args.author)
search_quotes(db_client, to_search=author)
elif args.delete:
delete_quotes(db_client)
elif args.output_file:
dump_quotes(db_client, file_name=args.output_file)
else:
parser.print_help()
db_client.close_connection()
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -
#
# Copyright (c) 2008, 2009 Benoit Chesneau <benoitc@e-engura.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""
curl transport
"""
import re
import io
import six
import sys
import restclient
from restclient.errors import TransportError
from restclient.transport.base import *
from restclient.utils import to_bytestring, iri2uri
try:
import pycurl
except ImportError:
pycurl = None
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.items()])
def _get_pycurl_errcode(symbol, default):
"""
Returns the numerical error code for a symbol defined by pycurl.
Different pycurl implementations define different symbols for error
codes. Old versions never define some symbols (wether they can return the
corresponding error code or not). The following addresses the problem by
defining the symbols we care about. Note: this allows to define symbols
for errors that older versions will never return, which is fine.
"""
return pycurl.__dict__.get(symbol, default)
if pycurl is not None:
CURLE_COULDNT_CONNECT = _get_pycurl_errcode('E_COULDNT_CONNECT', 7)
CURLE_COULDNT_RESOLVE_HOST = _get_pycurl_errcode('E_COULDNT_RESOLVE_HOST', 6)
CURLE_COULDNT_RESOLVE_PROXY = _get_pycurl_errcode('E_COULDNT_RESOLVE_PROXY', 5)
CURLE_GOT_NOTHING = _get_pycurl_errcode('E_GOT_NOTHING', 52)
CURLE_PARTIAL_FILE = _get_pycurl_errcode('E_PARTIAL_FILE', 18)
CURLE_SEND_ERROR = _get_pycurl_errcode('E_SEND_ERROR', 55)
CURLE_SSL_CACERT = _get_pycurl_errcode('E_SSL_CACERT', 60)
CURLE_SSL_CACERT_BADFILE = _get_pycurl_errcode('E_SSL_CACERT_BADFILE', 77)
class CurlTransport(HTTPTransportBase):
"""
An HTTP transportthat uses pycurl.
Pycurl is recommanded when you want fast access to http resources.
We have added some basic management of authentification and proxies,
but in case you want something specific you should use urllib2 or
httplib2 http clients. Any patch is welcome though ;)
Here is an example to use authentification with curl httpclient :
.. code-block:: python
httpclient = CurlTransport()
httpclient.add_credentials("test", "test")
.. seealso::
`Pycurl <http://pycurl.sourceforge.net>`_
"""
def __init__(self, timeout=None, proxy_infos=None):
""" Curl transport constructor
:param timeout: int, timeout of request
:param proxy_infos: dict, infos to connect via proxy:
.. code-block:: python
{
'proxy_user': 'XXXXXXX',
'proxy_password': 'XXXXXXX',
'proxy_host': 'proxy',
'proxy_port': 8080,
}
"""
HTTPTransportBase.__init__(self, proxy_infos=proxy_infos)
# path to certificate file
self.cabundle = None
if pycurl is None:
raise RuntimeError('Cannot find pycurl library')
self.timeout = timeout
def _parseHeaders(self, header_file):
header_file.seek(0)
# Remove the status line from the beginning of the input
unused_http_status_line = header_file.readline()
lines = [line.strip() for line in header_file]
# and the blank line from the end
empty_line = lines.pop()
if empty_line:
raise TransportError("No blank line at end")
headers = {}
separator = six.binary_type(':', 'utf-8')
for line in lines:
if separator in line:
try:
name, value = line.split(separator, 1)
except ValueError:
raise TransportError(
"Malformed HTTP header line in response: %r" % (line,))
value = value.strip()
# HTTP headers are case-insensitive
name = name.lower()
headers[name] = value
return headers
def request(self, url, method='GET', body=None, headers=None):
body = body or ""
headers = headers or {}
headers.setdefault('User-Agent',
"%s %s" % (USER_AGENT, pycurl.version,))
# by default turn off default pragma
headers.setdefault('Cache-control', 'max-age=0')
headers.setdefault('Pragma', 'no-cache')
if method == 'PUT':
headers.setdefault('Expect', '100-continue')
# encode url
origin_url = to_bytestring(url)
url = iri2uri(origin_url)
c = pycurl.Curl()
try:
# set curl options
if self.timeout is not None:
c.setopt(pycurl.TIMEOUT, self.timeout)
else: # no timeout by default
c.setopt(pycurl.TIMEOUT, 0)
data = io.BytesIO()
header = io.BytesIO()
c.setopt(pycurl.WRITEFUNCTION, data.write)
c.setopt(pycurl.HEADERFUNCTION, header.write)
c.setopt(pycurl.URL, url)
c.setopt(pycurl.FOLLOWLOCATION, 1)
c.setopt(pycurl.MAXREDIRS, 5)
c.setopt(pycurl.NOSIGNAL, 1)
if restclient.debuglevel > 0:
c.setopt(pycurl.VERBOSE, 1)
# automatic decompression
c.setopt(pycurl.ENCODING, 'gzip,deflate')
if self.cabundle:
c.setopt(pycurl.CAINFO, celf.cabundle)
#set proxy
if self.proxy_infos and self.proxy_infos.get('proxy_host', ''):
c.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_ANY)
c.setopt(pycurl.PROXY, self.proxy_infos.get('proxy_host'))
proxy_port = self.proxy_infos.get('proxy_port', '')
if proxy_port:
c.setopt(pycurl.PROXYPORT, str(proxy_port))
user = self.proxy_infos.get('proxy_user', '')
if user:
userpass = "%s:%s" % (user, self.proxy_infos.get('proxy_password', ''))
c.setopt(pycurl.PROXYUSERPWD, userpass)
# authentification
auth = self._get_credentials()
user = auth.get('user', None)
password = auth.get('password', None)
if user is not None:
c.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_ANY)
userpass = user + ':'
if password is not None: # '' is a valid password
userpass += password
c.setopt(pycurl.USERPWD, userpass)
# set method
if method == "GET":
c.setopt(pycurl.HTTPGET, 1)
elif method == "HEAD":
c.setopt(pycurl.HTTPGET, 1)
c.setopt(pycurl.NOBODY, 1)
elif method == "POST":
c.setopt(pycurl.POST, 1)
elif method == "PUT":
c.setopt(pycurl.UPLOAD, 1)
else:
c.setopt(pycurl.CUSTOMREQUEST, method)
if method in ('POST','PUT'):
if hasattr(body, 'read'):
content_length = int(headers.pop('Content-Length',
0))
content = body
else:
body = to_bytestring(body)
content = io.BytesIO(body)
if 'Content-Length' in headers:
del headers['Content-Length']
content_length = len(body)
if method == 'POST':
c.setopt(pycurl.POSTFIELDSIZE, content_length)
else:
c.setopt(pycurl.INFILESIZE, content_length)
c.setopt(pycurl.READFUNCTION, content.read)
if headers:
_normalize_headers(headers)
c.setopt(pycurl.HTTPHEADER,
["%s: %s" % pair for pair in sorted(headers.items())])
try:
c.perform()
except pycurl.error as e:
if e.args[0] != CURLE_SEND_ERROR:
if restclient.debuglevel > 0:
print(str(e), file=sys.stderr)
raise TransportError(e)
response_headers = self._parseHeaders(header)
code = c.getinfo(pycurl.RESPONSE_CODE)
return self._make_response(final_url=url, origin_url=origin_url,
status=code, headers=response_headers, body=data.getvalue())
finally:
c.close()
def _make_response(self, final_url=None, origin_url=None, status=None,
headers=None, body=None):
infos = headers or {}
final_url = infos.get('location', final_url)
infos.update({
'status': status,
'final_url': final_url,
'origin_url': origin_url
})
resp = HTTPResponse(infos)
return resp, body
|
|
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import os
import tempfile
import testtools
import time
from hashlib import md5
from mock import Mock, PropertyMock
from six.moves.queue import Queue, Empty as QueueEmptyError
from six import BytesIO
import swiftclient
import swiftclient.utils as utils
from swiftclient.client import Connection, ClientException
from swiftclient.service import SwiftService, SwiftError,\
SwiftUploadObject
import six
if six.PY2:
import __builtin__ as builtins
else:
import builtins
clean_os_environ = {}
environ_prefixes = ('ST_', 'OS_')
for key in os.environ:
if any(key.startswith(m) for m in environ_prefixes):
clean_os_environ[key] = ''
class TestSwiftPostObject(testtools.TestCase):
def setUp(self):
super(TestSwiftPostObject, self).setUp()
self.spo = swiftclient.service.SwiftPostObject
def test_create(self):
spo = self.spo('obj_name')
self.assertEqual(spo.object_name, 'obj_name')
self.assertEqual(spo.options, None)
def test_create_with_invalid_name(self):
# empty strings are not allowed as names
self.assertRaises(SwiftError, self.spo, '')
# names cannot be anything but strings
self.assertRaises(SwiftError, self.spo, 1)
class TestSwiftReader(testtools.TestCase):
def setUp(self):
super(TestSwiftReader, self).setUp()
self.sr = swiftclient.service._SwiftReader
self.md5_type = type(md5())
def test_create(self):
sr = self.sr('path', 'body', {})
self.assertEqual(sr._path, 'path')
self.assertEqual(sr._body, 'body')
self.assertEqual(sr._content_length, None)
self.assertEqual(sr._expected_etag, None)
self.assertNotEqual(sr._actual_md5, None)
self.assertTrue(isinstance(sr._actual_md5, self.md5_type))
def test_create_with_large_object_headers(self):
# md5 should not be initialized if large object headers are present
sr = self.sr('path', 'body', {'x-object-manifest': 'test'})
self.assertEqual(sr._path, 'path')
self.assertEqual(sr._body, 'body')
self.assertEqual(sr._content_length, None)
self.assertEqual(sr._expected_etag, None)
self.assertEqual(sr._actual_md5, None)
sr = self.sr('path', 'body', {'x-static-large-object': 'test'})
self.assertEqual(sr._path, 'path')
self.assertEqual(sr._body, 'body')
self.assertEqual(sr._content_length, None)
self.assertEqual(sr._expected_etag, None)
self.assertEqual(sr._actual_md5, None)
def test_create_with_content_length(self):
sr = self.sr('path', 'body', {'content-length': 5})
self.assertEqual(sr._path, 'path')
self.assertEqual(sr._body, 'body')
self.assertEqual(sr._content_length, 5)
self.assertEqual(sr._expected_etag, None)
self.assertNotEqual(sr._actual_md5, None)
self.assertTrue(isinstance(sr._actual_md5, self.md5_type))
# Check Contentlength raises error if it isnt an integer
self.assertRaises(SwiftError, self.sr, 'path', 'body',
{'content-length': 'notanint'})
def test_iterator_usage(self):
def _consume(sr):
for _ in sr:
pass
sr = self.sr('path', BytesIO(b'body'), {})
_consume(sr)
# Check error is raised if expected etag doesnt match calculated md5.
# md5 for a SwiftReader that has done nothing is
# d41d8cd98f00b204e9800998ecf8427e i.e md5 of nothing
sr = self.sr('path', BytesIO(b'body'), {'etag': 'doesntmatch'})
self.assertRaises(SwiftError, _consume, sr)
sr = self.sr('path', BytesIO(b'body'),
{'etag': '841a2d689ad86bd1611447453c22c6fc'})
_consume(sr)
# Check error is raised if SwiftReader doesnt read the same length
# as the content length it is created with
sr = self.sr('path', BytesIO(b'body'), {'content-length': 5})
self.assertRaises(SwiftError, _consume, sr)
sr = self.sr('path', BytesIO(b'body'), {'content-length': 4})
_consume(sr)
# Check that the iterator generates expected length and etag values
sr = self.sr('path', ['abc'.encode()] * 3, {})
_consume(sr)
self.assertEqual(sr._actual_read, 9)
self.assertEqual(sr._actual_md5.hexdigest(),
'97ac82a5b825239e782d0339e2d7b910')
class TestServiceDelete(testtools.TestCase):
def setUp(self):
super(TestServiceDelete, self).setUp()
self.opts = {'leave_segments': False, 'yes_all': False}
self.exc = Exception('test_exc')
# Base response to be copied and updated to matched the expected
# response for each test
self.expected = {
'action': None, # Should be string in the form delete_XX
'container': 'test_c',
'object': 'test_o',
'attempts': 2,
'response_dict': {},
'success': None # Should be a bool
}
def _get_mock_connection(self, attempts=2):
m = Mock(spec=Connection)
type(m).attempts = PropertyMock(return_value=attempts)
return m
def _get_queue(self, q):
# Instead of blocking pull items straight from the queue.
# expects at least one item otherwise the test will fail.
try:
return q.get_nowait()
except QueueEmptyError:
self.fail('Expected item in queue but found none')
def _get_expected(self, update=None):
expected = self.expected.copy()
if update:
expected.update(update)
return expected
def _assertDictEqual(self, a, b, m=None):
# assertDictEqual is not available in py2.6 so use a shallow check
# instead
if hasattr(self, 'assertDictEqual'):
self.assertDictEqual(a, b, m)
else:
self.assertTrue(isinstance(a, dict))
self.assertTrue(isinstance(b, dict))
self.assertEqual(len(a), len(b), m)
for k, v in a.items():
self.assertTrue(k in b, m)
self.assertEqual(b[k], v, m)
def test_delete_segment(self):
mock_q = Queue()
mock_conn = self._get_mock_connection()
expected_r = self._get_expected({
'action': 'delete_segment',
'object': 'test_s',
'success': True,
})
r = SwiftService._delete_segment(mock_conn, 'test_c', 'test_s', mock_q)
mock_conn.delete_object.assert_called_once_with(
'test_c', 'test_s', response_dict={}
)
self._assertDictEqual(expected_r, r)
self._assertDictEqual(expected_r, self._get_queue(mock_q))
def test_delete_segment_exception(self):
mock_q = Queue()
mock_conn = self._get_mock_connection()
mock_conn.delete_object = Mock(side_effect=self.exc)
expected_r = self._get_expected({
'action': 'delete_segment',
'object': 'test_s',
'success': False,
'error': self.exc
})
r = SwiftService._delete_segment(mock_conn, 'test_c', 'test_s', mock_q)
mock_conn.delete_object.assert_called_once_with(
'test_c', 'test_s', response_dict={}
)
self._assertDictEqual(expected_r, r)
self._assertDictEqual(expected_r, self._get_queue(mock_q))
def test_delete_object(self):
mock_q = Queue()
mock_conn = self._get_mock_connection()
mock_conn.head_object = Mock(return_value={})
expected_r = self._get_expected({
'action': 'delete_object',
'success': True
})
s = SwiftService()
r = s._delete_object(mock_conn, 'test_c', 'test_o', self.opts, mock_q)
mock_conn.head_object.assert_called_once_with('test_c', 'test_o')
mock_conn.delete_object.assert_called_once_with(
'test_c', 'test_o', query_string=None, response_dict={}
)
self._assertDictEqual(expected_r, r)
def test_delete_object_exception(self):
mock_q = Queue()
mock_conn = self._get_mock_connection()
mock_conn.delete_object = Mock(side_effect=self.exc)
expected_r = self._get_expected({
'action': 'delete_object',
'success': False,
'error': self.exc
})
# _delete_object doesnt populate attempts or response dict if it hits
# an error. This may not be the correct behaviour.
del expected_r['response_dict'], expected_r['attempts']
s = SwiftService()
r = s._delete_object(mock_conn, 'test_c', 'test_o', self.opts, mock_q)
mock_conn.head_object.assert_called_once_with('test_c', 'test_o')
mock_conn.delete_object.assert_called_once_with(
'test_c', 'test_o', query_string=None, response_dict={}
)
self._assertDictEqual(expected_r, r)
def test_delete_object_slo_support(self):
# If SLO headers are present the delete call should include an
# additional query string to cause the right delete server side
mock_q = Queue()
mock_conn = self._get_mock_connection()
mock_conn.head_object = Mock(
return_value={'x-static-large-object': True}
)
expected_r = self._get_expected({
'action': 'delete_object',
'success': True
})
s = SwiftService()
r = s._delete_object(mock_conn, 'test_c', 'test_o', self.opts, mock_q)
mock_conn.head_object.assert_called_once_with('test_c', 'test_o')
mock_conn.delete_object.assert_called_once_with(
'test_c', 'test_o',
query_string='multipart-manifest=delete',
response_dict={}
)
self._assertDictEqual(expected_r, r)
def test_delete_object_dlo_support(self):
mock_q = Queue()
s = SwiftService()
mock_conn = self._get_mock_connection()
expected_r = self._get_expected({
'action': 'delete_object',
'success': True,
'dlo_segments_deleted': True
})
# A DLO object is determined in _delete_object by heading the object
# and checking for the existence of a x-object-manifest header.
# Mock that here.
mock_conn.head_object = Mock(
return_value={'x-object-manifest': 'manifest_c/manifest_p'}
)
mock_conn.get_container = Mock(
side_effect=[(None, [{'name': 'test_seg_1'},
{'name': 'test_seg_2'}]),
(None, {})]
)
def get_mock_list_conn(options):
return mock_conn
with mock.patch('swiftclient.service.get_conn', get_mock_list_conn):
r = s._delete_object(
mock_conn, 'test_c', 'test_o', self.opts, mock_q
)
self._assertDictEqual(expected_r, r)
expected = [
mock.call('test_c', 'test_o', query_string=None, response_dict={}),
mock.call('manifest_c', 'test_seg_1', response_dict={}),
mock.call('manifest_c', 'test_seg_2', response_dict={})]
mock_conn.delete_object.assert_has_calls(expected, any_order=True)
def test_delete_empty_container(self):
mock_conn = self._get_mock_connection()
expected_r = self._get_expected({
'action': 'delete_container',
'success': True,
'object': None
})
r = SwiftService._delete_empty_container(mock_conn, 'test_c')
mock_conn.delete_container.assert_called_once_with(
'test_c', response_dict={}
)
self._assertDictEqual(expected_r, r)
def test_delete_empty_container_excpetion(self):
mock_conn = self._get_mock_connection()
mock_conn.delete_container = Mock(side_effect=self.exc)
expected_r = self._get_expected({
'action': 'delete_container',
'success': False,
'object': None,
'error': self.exc
})
s = SwiftService()
r = s._delete_empty_container(mock_conn, 'test_c')
mock_conn.delete_container.assert_called_once_with(
'test_c', response_dict={}
)
self._assertDictEqual(expected_r, r)
class TestSwiftError(testtools.TestCase):
def test_is_exception(self):
se = SwiftError(5)
self.assertTrue(isinstance(se, Exception))
def test_empty_swifterror_creation(self):
se = SwiftError(5)
self.assertEqual(se.value, 5)
self.assertEqual(se.container, None)
self.assertEqual(se.obj, None)
self.assertEqual(se.segment, None)
self.assertEqual(se.exception, None)
self.assertEqual(str(se), '5')
def test_swifterror_creation(self):
test_exc = Exception('test exc')
se = SwiftError(5, 'con', 'obj', 'seg', test_exc)
self.assertEqual(se.value, 5)
self.assertEqual(se.container, 'con')
self.assertEqual(se.obj, 'obj')
self.assertEqual(se.segment, 'seg')
self.assertEqual(se.exception, test_exc)
self.assertEqual(str(se), '5 container:con object:obj segment:seg')
class TestServiceUtils(testtools.TestCase):
def setUp(self):
super(TestServiceUtils, self).setUp()
with mock.patch.dict(swiftclient.service.environ, clean_os_environ):
swiftclient.service._default_global_options = \
swiftclient.service._build_default_global_options()
self.opts = swiftclient.service._default_global_options.copy()
def test_process_options_defaults(self):
# The only actions that should be taken on default options set is
# to change the auth version to v2.0 and create the os_options dict
opt_c = self.opts.copy()
swiftclient.service.process_options(opt_c)
self.assertTrue('os_options' in opt_c)
del opt_c['os_options']
self.assertEqual(opt_c['auth_version'], '2.0')
opt_c['auth_version'] = '1.0'
self.assertEqual(opt_c, self.opts)
def test_process_options_auth_version(self):
# auth_version should be set to 2.0
# if it isnt already set to 3.0
# and the v1 command line arguments arent present
opt_c = self.opts.copy()
# Check v3 isnt changed
opt_c['auth_version'] = '3'
swiftclient.service.process_options(opt_c)
self.assertEqual(opt_c['auth_version'], '3')
# Check v1 isnt changed if user, key and auth are set
opt_c = self.opts.copy()
opt_c['auth_version'] = '1'
opt_c['auth'] = True
opt_c['user'] = True
opt_c['key'] = True
swiftclient.service.process_options(opt_c)
self.assertEqual(opt_c['auth_version'], '1')
def test_process_options_new_style_args(self):
# checks new style args are copied to old style
# when old style dont exist
opt_c = self.opts.copy()
opt_c['auth'] = ''
opt_c['user'] = ''
opt_c['key'] = ''
opt_c['os_auth_url'] = 'os_auth'
opt_c['os_username'] = 'os_user'
opt_c['os_password'] = 'os_pass'
swiftclient.service.process_options(opt_c)
self.assertEqual(opt_c['auth_version'], '2.0')
self.assertEqual(opt_c['auth'], 'os_auth')
self.assertEqual(opt_c['user'], 'os_user')
self.assertEqual(opt_c['key'], 'os_pass')
# Check old style args are left alone if they exist
opt_c = self.opts.copy()
opt_c['auth'] = 'auth'
opt_c['user'] = 'user'
opt_c['key'] = 'key'
opt_c['os_auth_url'] = 'os_auth'
opt_c['os_username'] = 'os_user'
opt_c['os_password'] = 'os_pass'
swiftclient.service.process_options(opt_c)
self.assertEqual(opt_c['auth_version'], '1.0')
self.assertEqual(opt_c['auth'], 'auth')
self.assertEqual(opt_c['user'], 'user')
self.assertEqual(opt_c['key'], 'key')
def test_split_headers(self):
mock_headers = ['color:blue', 'size:large']
expected = {'Color': 'blue', 'Size': 'large'}
actual = swiftclient.service.split_headers(mock_headers)
self.assertEqual(expected, actual)
def test_split_headers_prefix(self):
mock_headers = ['color:blue', 'size:large']
expected = {'Prefix-Color': 'blue', 'Prefix-Size': 'large'}
actual = swiftclient.service.split_headers(mock_headers, 'prefix-')
self.assertEqual(expected, actual)
def test_split_headers_error(self):
mock_headers = ['notvalid']
self.assertRaises(SwiftError, swiftclient.service.split_headers,
mock_headers)
class TestSwiftUploadObject(testtools.TestCase):
def setUp(self):
self.suo = swiftclient.service.SwiftUploadObject
super(TestSwiftUploadObject, self).setUp()
def test_create_with_string(self):
suo = self.suo('source')
self.assertEqual(suo.source, 'source')
self.assertEqual(suo.object_name, 'source')
self.assertEqual(suo.options, None)
suo = self.suo('source', 'obj_name')
self.assertEqual(suo.source, 'source')
self.assertEqual(suo.object_name, 'obj_name')
self.assertEqual(suo.options, None)
suo = self.suo('source', 'obj_name', {'opt': '123'})
self.assertEqual(suo.source, 'source')
self.assertEqual(suo.object_name, 'obj_name')
self.assertEqual(suo.options, {'opt': '123'})
def test_create_with_file(self):
with tempfile.TemporaryFile() as mock_file:
# Check error is raised if no object name is provided with a
# filelike object
self.assertRaises(SwiftError, self.suo, mock_file)
# Check that empty strings are invalid object names
self.assertRaises(SwiftError, self.suo, mock_file, '')
suo = self.suo(mock_file, 'obj_name')
self.assertEqual(suo.source, mock_file)
self.assertEqual(suo.object_name, 'obj_name')
self.assertEqual(suo.options, None)
suo = self.suo(mock_file, 'obj_name', {'opt': '123'})
self.assertEqual(suo.source, mock_file)
self.assertEqual(suo.object_name, 'obj_name')
self.assertEqual(suo.options, {'opt': '123'})
def test_create_with_no_source(self):
suo = self.suo(None, 'obj_name')
self.assertEqual(suo.source, None)
self.assertEqual(suo.object_name, 'obj_name')
self.assertEqual(suo.options, None)
# Check error is raised if source is None without an object name
self.assertRaises(SwiftError, self.suo, None)
def test_create_with_invalid_source(self):
# Source can only be None, string or filelike object,
# check an error is raised with an invalid type.
self.assertRaises(SwiftError, self.suo, [])
class TestService(testtools.TestCase):
def test_upload_with_bad_segment_size(self):
for bad in ('ten', '1234X', '100.3'):
options = {'segment_size': bad}
try:
service = SwiftService(options)
next(service.upload('c', 'o'))
self.fail('Expected SwiftError when segment_size=%s' % bad)
except SwiftError as exc:
self.assertEqual('Segment size should be an integer value',
exc.value)
@mock.patch('swiftclient.service.stat')
@mock.patch('swiftclient.service.getmtime', return_value=1.0)
@mock.patch('swiftclient.service.getsize', return_value=4)
@mock.patch.object(builtins, 'open', return_value=six.StringIO('asdf'))
def test_upload_with_relative_path(self, *args, **kwargs):
service = SwiftService({})
objects = [{'path': "./test",
'strt_indx': 2},
{'path': os.path.join(os.getcwd(), "test"),
'strt_indx': 1},
{'path': ".\\test",
'strt_indx': 2}]
for obj in objects:
with mock.patch('swiftclient.service.Connection') as mock_conn:
mock_conn.return_value.head_object.side_effect = \
ClientException('Not Found', http_status=404)
mock_conn.return_value.put_object.return_value =\
'd41d8cd98f00b204e9800998ecf8427e'
resp_iter = service.upload(
'c', [SwiftUploadObject(obj['path'])])
responses = [x for x in resp_iter]
for resp in responses:
self.assertTrue(resp['success'])
self.assertEqual(2, len(responses))
create_container_resp, upload_obj_resp = responses
self.assertEqual(create_container_resp['action'],
'create_container')
self.assertEqual(upload_obj_resp['action'],
'upload_object')
self.assertEqual(upload_obj_resp['object'],
obj['path'][obj['strt_indx']:])
self.assertEqual(upload_obj_resp['path'], obj['path'])
class TestServiceUpload(testtools.TestCase):
def _assertDictEqual(self, a, b, m=None):
# assertDictEqual is not available in py2.6 so use a shallow check
# instead
if not m:
m = '{0} != {1}'.format(a, b)
if hasattr(self, 'assertDictEqual'):
self.assertDictEqual(a, b, m)
else:
self.assertIsInstance(a, dict, m)
self.assertIsInstance(b, dict, m)
self.assertEqual(len(a), len(b), m)
for k, v in a.items():
self.assertIn(k, b, m)
self.assertEqual(b[k], v, m)
def test_upload_segment_job(self):
with tempfile.NamedTemporaryFile() as f:
f.write(b'a' * 10)
f.write(b'b' * 10)
f.write(b'c' * 10)
f.flush()
# Mock the connection to return an empty etag. This
# skips etag validation which would fail as the LengthWrapper
# isnt read from.
mock_conn = mock.Mock()
mock_conn.put_object.return_value = ''
type(mock_conn).attempts = mock.PropertyMock(return_value=2)
expected_r = {
'action': 'upload_segment',
'for_object': 'test_o',
'segment_index': 2,
'segment_size': 10,
'segment_location': '/test_c_segments/test_s_1',
'log_line': 'test_o segment 2',
'success': True,
'response_dict': {},
'segment_etag': '',
'attempts': 2,
}
s = SwiftService()
r = s._upload_segment_job(conn=mock_conn,
path=f.name,
container='test_c',
segment_name='test_s_1',
segment_start=10,
segment_size=10,
segment_index=2,
obj_name='test_o',
options={'segment_container': None,
'checksum': True})
self._assertDictEqual(r, expected_r)
self.assertEqual(mock_conn.put_object.call_count, 1)
mock_conn.put_object.assert_called_with('test_c_segments',
'test_s_1',
mock.ANY,
content_length=10,
response_dict={})
contents = mock_conn.put_object.call_args[0][2]
self.assertIsInstance(contents, utils.LengthWrapper)
self.assertEqual(len(contents), 10)
# This read forces the LengthWrapper to calculate the md5
# for the read content.
self.assertEqual(contents.read(), b'b' * 10)
self.assertEqual(contents.get_md5sum(), md5(b'b' * 10).hexdigest())
def test_etag_mismatch_with_ignore_checksum(self):
def _consuming_conn(*a, **kw):
contents = a[2]
contents.read() # Force md5 calculation
return 'badresponseetag'
with tempfile.NamedTemporaryFile() as f:
f.write(b'a' * 10)
f.write(b'b' * 10)
f.write(b'c' * 10)
f.flush()
mock_conn = mock.Mock()
mock_conn.put_object.side_effect = _consuming_conn
type(mock_conn).attempts = mock.PropertyMock(return_value=2)
s = SwiftService()
r = s._upload_segment_job(conn=mock_conn,
path=f.name,
container='test_c',
segment_name='test_s_1',
segment_start=10,
segment_size=10,
segment_index=2,
obj_name='test_o',
options={'segment_container': None,
'checksum': False})
self.assertNotIn('error', r)
self.assertEqual(mock_conn.put_object.call_count, 1)
mock_conn.put_object.assert_called_with('test_c_segments',
'test_s_1',
mock.ANY,
content_length=10,
response_dict={})
contents = mock_conn.put_object.call_args[0][2]
# Check that md5sum is not calculated.
self.assertEqual(contents.get_md5sum(), '')
def test_upload_segment_job_etag_mismatch(self):
def _consuming_conn(*a, **kw):
contents = a[2]
contents.read() # Force md5 calculation
return 'badresponseetag'
with tempfile.NamedTemporaryFile() as f:
f.write(b'a' * 10)
f.write(b'b' * 10)
f.write(b'c' * 10)
f.flush()
mock_conn = mock.Mock()
mock_conn.put_object.side_effect = _consuming_conn
type(mock_conn).attempts = mock.PropertyMock(return_value=2)
s = SwiftService()
r = s._upload_segment_job(conn=mock_conn,
path=f.name,
container='test_c',
segment_name='test_s_1',
segment_start=10,
segment_size=10,
segment_index=2,
obj_name='test_o',
options={'segment_container': None,
'checksum': True})
self.assertIn('error', r)
self.assertIn('md5 mismatch', str(r['error']))
self.assertEqual(mock_conn.put_object.call_count, 1)
mock_conn.put_object.assert_called_with('test_c_segments',
'test_s_1',
mock.ANY,
content_length=10,
response_dict={})
contents = mock_conn.put_object.call_args[0][2]
self.assertEqual(contents.get_md5sum(), md5(b'b' * 10).hexdigest())
def test_upload_object_job_file(self):
# Uploading a file results in the file object being wrapped in a
# LengthWrapper. This test sets the options in such a way that much
# of _upload_object_job is skipped bringing the critical path down
# to around 60 lines to ease testing.
with tempfile.NamedTemporaryFile() as f:
f.write(b'a' * 30)
f.flush()
expected_r = {
'action': 'upload_object',
'attempts': 2,
'container': 'test_c',
'headers': {},
'large_object': False,
'object': 'test_o',
'response_dict': {},
'status': 'uploaded',
'success': True,
}
expected_mtime = float(os.path.getmtime(f.name))
mock_conn = mock.Mock()
mock_conn.put_object.return_value = ''
type(mock_conn).attempts = mock.PropertyMock(return_value=2)
s = SwiftService()
r = s._upload_object_job(conn=mock_conn,
container='test_c',
source=f.name,
obj='test_o',
options={'changed': False,
'skip_identical': False,
'leave_segments': True,
'header': '',
'segment_size': 0,
'checksum': True})
mtime = float(r['headers']['x-object-meta-mtime'])
self.assertAlmostEqual(mtime, expected_mtime, delta=0.5)
del r['headers']['x-object-meta-mtime']
self.assertEqual(r['path'], f.name)
del r['path']
self._assertDictEqual(r, expected_r)
self.assertEqual(mock_conn.put_object.call_count, 1)
mock_conn.put_object.assert_called_with('test_c', 'test_o',
mock.ANY,
content_length=30,
headers={},
response_dict={})
contents = mock_conn.put_object.call_args[0][2]
self.assertIsInstance(contents, utils.LengthWrapper)
self.assertEqual(len(contents), 30)
# This read forces the LengthWrapper to calculate the md5
# for the read content. This also checks that LengthWrapper was
# initialized with md5=True
self.assertEqual(contents.read(), b'a' * 30)
self.assertEqual(contents.get_md5sum(), md5(b'a' * 30).hexdigest())
def test_upload_object_job_stream(self):
# Streams are wrapped as ReadableToIterable
with tempfile.TemporaryFile() as f:
f.write(b'a' * 30)
f.flush()
f.seek(0)
expected_r = {
'action': 'upload_object',
'attempts': 2,
'container': 'test_c',
'headers': {},
'large_object': False,
'object': 'test_o',
'response_dict': {},
'status': 'uploaded',
'success': True,
'path': None,
}
expected_mtime = float(time.time())
mock_conn = mock.Mock()
mock_conn.put_object.return_value = ''
type(mock_conn).attempts = mock.PropertyMock(return_value=2)
s = SwiftService()
r = s._upload_object_job(conn=mock_conn,
container='test_c',
source=f,
obj='test_o',
options={'changed': False,
'skip_identical': False,
'leave_segments': True,
'header': '',
'segment_size': 0,
'checksum': True})
mtime = float(r['headers']['x-object-meta-mtime'])
self.assertAlmostEqual(mtime, expected_mtime, delta=0.5)
del r['headers']['x-object-meta-mtime']
self._assertDictEqual(r, expected_r)
self.assertEqual(mock_conn.put_object.call_count, 1)
mock_conn.put_object.assert_called_with('test_c', 'test_o',
mock.ANY,
content_length=None,
headers={},
response_dict={})
contents = mock_conn.put_object.call_args[0][2]
self.assertIsInstance(contents, utils.ReadableToIterable)
self.assertEqual(contents.chunk_size, 65536)
# next retrieves the first chunk of the stream or len(chunk_size)
# or less, it also forces the md5 to be calculated.
self.assertEqual(next(contents), b'a' * 30)
self.assertEqual(contents.get_md5sum(), md5(b'a' * 30).hexdigest())
def test_upload_object_job_etag_mismatch(self):
# The etag test for both streams and files use the same code
# so only one test should be needed.
def _consuming_conn(*a, **kw):
contents = a[2]
contents.read() # Force md5 calculation
return 'badresponseetag'
with tempfile.NamedTemporaryFile() as f:
f.write(b'a' * 30)
f.flush()
mock_conn = mock.Mock()
mock_conn.put_object.side_effect = _consuming_conn
type(mock_conn).attempts = mock.PropertyMock(return_value=2)
s = SwiftService()
r = s._upload_object_job(conn=mock_conn,
container='test_c',
source=f.name,
obj='test_o',
options={'changed': False,
'skip_identical': False,
'leave_segments': True,
'header': '',
'segment_size': 0,
'checksum': True})
self.assertEqual(r['success'], False)
self.assertIn('error', r)
self.assertIn('md5 mismatch', str(r['error']))
self.assertEqual(mock_conn.put_object.call_count, 1)
expected_headers = {'x-object-meta-mtime': mock.ANY}
mock_conn.put_object.assert_called_with('test_c', 'test_o',
mock.ANY,
content_length=30,
headers=expected_headers,
response_dict={})
contents = mock_conn.put_object.call_args[0][2]
self.assertEqual(contents.get_md5sum(), md5(b'a' * 30).hexdigest())
def test_upload_object_job_identical_etag(self):
with tempfile.NamedTemporaryFile() as f:
f.write(b'a' * 30)
f.flush()
mock_conn = mock.Mock()
mock_conn.head_object.return_value = {
'content-length': 30,
'etag': md5(b'a' * 30).hexdigest()}
type(mock_conn).attempts = mock.PropertyMock(return_value=2)
s = SwiftService()
r = s._upload_object_job(conn=mock_conn,
container='test_c',
source=f.name,
obj='test_o',
options={'changed': False,
'skip_identical': True,
'leave_segments': True,
'header': '',
'segment_size': 0})
self.assertTrue(r['success'])
self.assertIn('status', r)
self.assertEqual(r['status'], 'skipped-identical')
self.assertEqual(mock_conn.put_object.call_count, 0)
self.assertEqual(mock_conn.head_object.call_count, 1)
mock_conn.head_object.assert_called_with('test_c', 'test_o')
def test_upload_object_job_identical_slo_with_nesting(self):
with tempfile.NamedTemporaryFile() as f:
f.write(b'a' * 30)
f.flush()
seg_etag = md5(b'a' * 10).hexdigest()
submanifest = "[%s]" % ",".join(
['{"bytes":10,"hash":"%s"}' % seg_etag] * 2)
submanifest_etag = md5(seg_etag.encode('ascii') * 2).hexdigest()
manifest = "[%s]" % ",".join([
'{"sub_slo":true,"name":"/test_c_segments/test_sub_slo",'
'"bytes":20,"hash":"%s"}' % submanifest_etag,
'{"bytes":10,"hash":"%s"}' % seg_etag])
mock_conn = mock.Mock()
mock_conn.head_object.return_value = {
'x-static-large-object': True,
'content-length': 30,
'etag': md5(submanifest_etag.encode('ascii') +
seg_etag.encode('ascii')).hexdigest()}
mock_conn.get_object.side_effect = [
({}, manifest.encode('ascii')),
({}, submanifest.encode('ascii'))]
type(mock_conn).attempts = mock.PropertyMock(return_value=2)
s = SwiftService()
r = s._upload_object_job(conn=mock_conn,
container='test_c',
source=f.name,
obj='test_o',
options={'changed': False,
'skip_identical': True,
'leave_segments': True,
'header': '',
'segment_size': 10})
self.assertIsNone(r.get('error'))
self.assertTrue(r['success'])
self.assertEqual('skipped-identical', r.get('status'))
self.assertEqual(0, mock_conn.put_object.call_count)
self.assertEqual([mock.call('test_c', 'test_o')],
mock_conn.head_object.mock_calls)
self.assertEqual([
mock.call('test_c', 'test_o',
query_string='multipart-manifest=get'),
mock.call('test_c_segments', 'test_sub_slo',
query_string='multipart-manifest=get'),
], mock_conn.get_object.mock_calls)
def test_upload_object_job_identical_dlo(self):
with tempfile.NamedTemporaryFile() as f:
f.write(b'a' * 30)
f.flush()
segment_etag = md5(b'a' * 10).hexdigest()
mock_conn = mock.Mock()
mock_conn.head_object.return_value = {
'x-object-manifest': 'test_c_segments/test_o/prefix',
'content-length': 30,
'etag': md5(segment_etag.encode('ascii') * 3).hexdigest()}
mock_conn.get_container.side_effect = [
(None, [{"bytes": 10, "hash": segment_etag,
"name": "test_o/prefix/00"},
{"bytes": 10, "hash": segment_etag,
"name": "test_o/prefix/01"}]),
(None, [{"bytes": 10, "hash": segment_etag,
"name": "test_o/prefix/02"}]),
(None, {})]
type(mock_conn).attempts = mock.PropertyMock(return_value=2)
s = SwiftService()
with mock.patch('swiftclient.service.get_conn',
return_value=mock_conn):
r = s._upload_object_job(conn=mock_conn,
container='test_c',
source=f.name,
obj='test_o',
options={'changed': False,
'skip_identical': True,
'leave_segments': True,
'header': '',
'segment_size': 10})
self.assertIsNone(r.get('error'))
self.assertTrue(r['success'])
self.assertEqual('skipped-identical', r.get('status'))
self.assertEqual(0, mock_conn.put_object.call_count)
self.assertEqual(1, mock_conn.head_object.call_count)
self.assertEqual(3, mock_conn.get_container.call_count)
mock_conn.head_object.assert_called_with('test_c', 'test_o')
expected = [
mock.call('test_c_segments', prefix='test_o/prefix',
marker='', delimiter=None),
mock.call('test_c_segments', prefix='test_o/prefix',
marker="test_o/prefix/01", delimiter=None),
mock.call('test_c_segments', prefix='test_o/prefix',
marker="test_o/prefix/02", delimiter=None),
]
mock_conn.get_container.assert_has_calls(expected)
class TestServiceDownload(testtools.TestCase):
def setUp(self):
super(TestServiceDownload, self).setUp()
self.opts = swiftclient.service._default_local_options.copy()
self.opts['no_download'] = True
self.obj_content = b'c' * 10
self.obj_etag = md5(self.obj_content).hexdigest()
self.obj_len = len(self.obj_content)
def _readbody(self):
yield self.obj_content
def _assertDictEqual(self, a, b, m=None):
# assertDictEqual is not available in py2.6 so use a shallow check
# instead
if not m:
m = '{0} != {1}'.format(a, b)
if hasattr(self, 'assertDictEqual'):
self.assertDictEqual(a, b, m)
else:
self.assertTrue(isinstance(a, dict), m)
self.assertTrue(isinstance(b, dict), m)
self.assertEqual(len(a), len(b), m)
for k, v in a.items():
self.assertIn(k, b, m)
self.assertEqual(b[k], v, m)
def test_download(self):
service = SwiftService()
with mock.patch('swiftclient.service.Connection') as mock_conn:
header = {'content-length': self.obj_len,
'etag': self.obj_etag}
mock_conn.get_object.return_value = header, self._readbody()
resp = service._download_object_job(mock_conn,
'c',
'test',
self.opts)
self.assertTrue(resp['success'])
self.assertEqual(resp['action'], 'download_object')
self.assertEqual(resp['object'], 'test')
self.assertEqual(resp['path'], 'test')
def test_download_with_output_dir(self):
service = SwiftService()
with mock.patch('swiftclient.service.Connection') as mock_conn:
header = {'content-length': self.obj_len,
'etag': self.obj_etag}
mock_conn.get_object.return_value = header, self._readbody()
options = self.opts.copy()
options['out_directory'] = 'temp_dir'
resp = service._download_object_job(mock_conn,
'c',
'example/test',
options)
self.assertTrue(resp['success'])
self.assertEqual(resp['action'], 'download_object')
self.assertEqual(resp['object'], 'example/test')
self.assertEqual(resp['path'], 'temp_dir/example/test')
def test_download_with_remove_prefix(self):
service = SwiftService()
with mock.patch('swiftclient.service.Connection') as mock_conn:
header = {'content-length': self.obj_len,
'etag': self.obj_etag}
mock_conn.get_object.return_value = header, self._readbody()
options = self.opts.copy()
options['prefix'] = 'example/'
options['remove_prefix'] = True
resp = service._download_object_job(mock_conn,
'c',
'example/test',
options)
self.assertTrue(resp['success'])
self.assertEqual(resp['action'], 'download_object')
self.assertEqual(resp['object'], 'example/test')
self.assertEqual(resp['path'], 'test')
def test_download_with_remove_prefix_and_remove_slashes(self):
service = SwiftService()
with mock.patch('swiftclient.service.Connection') as mock_conn:
header = {'content-length': self.obj_len,
'etag': self.obj_etag}
mock_conn.get_object.return_value = header, self._readbody()
options = self.opts.copy()
options['prefix'] = 'example'
options['remove_prefix'] = True
resp = service._download_object_job(mock_conn,
'c',
'example/test',
options)
self.assertTrue(resp['success'])
self.assertEqual(resp['action'], 'download_object')
self.assertEqual(resp['object'], 'example/test')
self.assertEqual(resp['path'], 'test')
def test_download_with_output_dir_and_remove_prefix(self):
service = SwiftService()
with mock.patch('swiftclient.service.Connection') as mock_conn:
header = {'content-length': self.obj_len,
'etag': self.obj_etag}
mock_conn.get_object.return_value = header, self._readbody()
options = self.opts.copy()
options['prefix'] = 'example'
options['out_directory'] = 'new/dir'
options['remove_prefix'] = True
resp = service._download_object_job(mock_conn,
'c',
'example/test',
options)
self.assertTrue(resp['success'])
self.assertEqual(resp['action'], 'download_object')
self.assertEqual(resp['object'], 'example/test')
self.assertEqual(resp['path'], 'new/dir/test')
def test_download_object_job_skip_identical(self):
with tempfile.NamedTemporaryFile() as f:
f.write(b'a' * 30)
f.flush()
err = swiftclient.ClientException('Object GET failed',
http_status=304)
def fake_get(*args, **kwargs):
kwargs['response_dict']['headers'] = {}
raise err
mock_conn = mock.Mock()
mock_conn.get_object.side_effect = fake_get
type(mock_conn).attempts = mock.PropertyMock(return_value=2)
expected_r = {
'action': 'download_object',
'container': 'test_c',
'object': 'test_o',
'success': False,
'error': err,
'response_dict': {'headers': {}},
'path': 'test_o',
'pseudodir': False,
'attempts': 2,
}
s = SwiftService()
r = s._download_object_job(conn=mock_conn,
container='test_c',
obj='test_o',
options={'out_file': f.name,
'out_directory': None,
'prefix': None,
'remove_prefix': False,
'header': {},
'yes_all': False,
'skip_identical': True})
self._assertDictEqual(r, expected_r)
self.assertEqual(mock_conn.get_object.call_count, 1)
mock_conn.get_object.assert_called_with(
'test_c',
'test_o',
resp_chunk_size=65536,
headers={'If-None-Match': md5(b'a' * 30).hexdigest()},
query_string='multipart-manifest=get',
response_dict=expected_r['response_dict'])
def test_download_object_job_skip_identical_dlo(self):
with tempfile.NamedTemporaryFile() as f:
f.write(b'a' * 30)
f.flush()
on_disk_md5 = md5(b'a' * 30).hexdigest()
segment_md5 = md5(b'a' * 10).hexdigest()
mock_conn = mock.Mock()
mock_conn.get_object.return_value = (
{'x-object-manifest': 'test_c_segments/test_o/prefix'}, [b''])
mock_conn.get_container.side_effect = [
(None, [{'name': 'test_o/prefix/1',
'bytes': 10, 'hash': segment_md5},
{'name': 'test_o/prefix/2',
'bytes': 10, 'hash': segment_md5}]),
(None, [{'name': 'test_o/prefix/3',
'bytes': 10, 'hash': segment_md5}]),
(None, [])]
type(mock_conn).attempts = mock.PropertyMock(return_value=2)
expected_r = {
'action': 'download_object',
'container': 'test_c',
'object': 'test_o',
'success': False,
'response_dict': {},
'path': 'test_o',
'pseudodir': False,
'attempts': 2,
}
s = SwiftService()
with mock.patch('swiftclient.service.get_conn',
return_value=mock_conn):
r = s._download_object_job(conn=mock_conn,
container='test_c',
obj='test_o',
options={'out_file': f.name,
'out_directory': None,
'prefix': None,
'remove_prefix': False,
'header': {},
'yes_all': False,
'skip_identical': True})
err = r.pop('error')
self.assertEqual("Large object is identical", err.msg)
self.assertEqual(304, err.http_status)
self._assertDictEqual(r, expected_r)
self.assertEqual(mock_conn.get_object.call_count, 1)
mock_conn.get_object.assert_called_with(
'test_c',
'test_o',
resp_chunk_size=65536,
headers={'If-None-Match': on_disk_md5},
query_string='multipart-manifest=get',
response_dict=expected_r['response_dict'])
self.assertEqual(mock_conn.get_container.mock_calls, [
mock.call('test_c_segments',
delimiter=None,
prefix='test_o/prefix',
marker=''),
mock.call('test_c_segments',
delimiter=None,
prefix='test_o/prefix',
marker='test_o/prefix/2'),
mock.call('test_c_segments',
delimiter=None,
prefix='test_o/prefix',
marker='test_o/prefix/3')])
def test_download_object_job_skip_identical_nested_slo(self):
with tempfile.NamedTemporaryFile() as f:
f.write(b'a' * 30)
f.flush()
on_disk_md5 = md5(b'a' * 30).hexdigest()
seg_etag = md5(b'a' * 10).hexdigest()
submanifest = "[%s]" % ",".join(
['{"bytes":10,"hash":"%s"}' % seg_etag] * 2)
submanifest_etag = md5(seg_etag.encode('ascii') * 2).hexdigest()
manifest = "[%s]" % ",".join([
'{"sub_slo":true,"name":"/test_c_segments/test_sub_slo",'
'"bytes":20,"hash":"%s"}' % submanifest_etag,
'{"bytes":10,"hash":"%s"}' % seg_etag])
mock_conn = mock.Mock()
mock_conn.get_object.side_effect = [
({'x-static-large-object': True,
'content-length': 30,
'etag': md5(submanifest_etag.encode('ascii') +
seg_etag.encode('ascii')).hexdigest()},
[manifest.encode('ascii')]),
({'x-static-large-object': True,
'content-length': 20,
'etag': submanifest_etag},
submanifest.encode('ascii'))]
type(mock_conn).attempts = mock.PropertyMock(return_value=2)
expected_r = {
'action': 'download_object',
'container': 'test_c',
'object': 'test_o',
'success': False,
'response_dict': {},
'path': 'test_o',
'pseudodir': False,
'attempts': 2,
}
s = SwiftService()
with mock.patch('swiftclient.service.get_conn',
return_value=mock_conn):
r = s._download_object_job(conn=mock_conn,
container='test_c',
obj='test_o',
options={'out_file': f.name,
'out_directory': None,
'prefix': None,
'remove_prefix': False,
'header': {},
'yes_all': False,
'skip_identical': True})
err = r.pop('error')
self.assertEqual("Large object is identical", err.msg)
self.assertEqual(304, err.http_status)
self._assertDictEqual(r, expected_r)
self.assertEqual(mock_conn.get_object.mock_calls, [
mock.call('test_c',
'test_o',
resp_chunk_size=65536,
headers={'If-None-Match': on_disk_md5},
query_string='multipart-manifest=get',
response_dict={}),
mock.call('test_c_segments',
'test_sub_slo',
query_string='multipart-manifest=get')])
def test_download_object_job_skip_identical_diff_dlo(self):
with tempfile.NamedTemporaryFile() as f:
f.write(b'a' * 30)
f.write(b'b')
f.flush()
on_disk_md5 = md5(b'a' * 30 + b'b').hexdigest()
segment_md5 = md5(b'a' * 10).hexdigest()
mock_conn = mock.Mock()
mock_conn.get_object.side_effect = [
({'x-object-manifest': 'test_c_segments/test_o/prefix'},
[b'']),
({'x-object-manifest': 'test_c_segments/test_o/prefix'},
[b'a' * 30])]
mock_conn.get_container.side_effect = [
(None, [{'name': 'test_o/prefix/1',
'bytes': 10, 'hash': segment_md5},
{'name': 'test_o/prefix/2',
'bytes': 10, 'hash': segment_md5}]),
(None, [{'name': 'test_o/prefix/3',
'bytes': 10, 'hash': segment_md5}]),
(None, [])]
type(mock_conn).attempts = mock.PropertyMock(return_value=2)
type(mock_conn).auth_end_time = mock.PropertyMock(return_value=14)
expected_r = {
'action': 'download_object',
'container': 'test_c',
'object': 'test_o',
'success': True,
'response_dict': {},
'path': 'test_o',
'pseudodir': False,
'read_length': 30,
'attempts': 2,
'start_time': 0,
'headers_receipt': 1,
'finish_time': 2,
'auth_end_time': mock_conn.auth_end_time,
}
options = self.opts.copy()
options['out_file'] = f.name
options['skip_identical'] = True
s = SwiftService()
with mock.patch('swiftclient.service.time', side_effect=range(3)):
with mock.patch('swiftclient.service.get_conn',
return_value=mock_conn):
r = s._download_object_job(
conn=mock_conn,
container='test_c',
obj='test_o',
options=options)
self._assertDictEqual(r, expected_r)
self.assertEqual(mock_conn.get_container.mock_calls, [
mock.call('test_c_segments',
delimiter=None,
prefix='test_o/prefix',
marker=''),
mock.call('test_c_segments',
delimiter=None,
prefix='test_o/prefix',
marker='test_o/prefix/2'),
mock.call('test_c_segments',
delimiter=None,
prefix='test_o/prefix',
marker='test_o/prefix/3')])
self.assertEqual(mock_conn.get_object.mock_calls, [
mock.call('test_c',
'test_o',
resp_chunk_size=65536,
headers={'If-None-Match': on_disk_md5},
query_string='multipart-manifest=get',
response_dict={}),
mock.call('test_c',
'test_o',
resp_chunk_size=65536,
headers={'If-None-Match': on_disk_md5},
response_dict={})])
def test_download_object_job_skip_identical_diff_nested_slo(self):
with tempfile.NamedTemporaryFile() as f:
f.write(b'a' * 29)
f.flush()
on_disk_md5 = md5(b'a' * 29).hexdigest()
seg_etag = md5(b'a' * 10).hexdigest()
submanifest = "[%s]" % ",".join(
['{"bytes":10,"hash":"%s"}' % seg_etag] * 2)
submanifest_etag = md5(seg_etag.encode('ascii') * 2).hexdigest()
manifest = "[%s]" % ",".join([
'{"sub_slo":true,"name":"/test_c_segments/test_sub_slo",'
'"bytes":20,"hash":"%s"}' % submanifest_etag,
'{"bytes":10,"hash":"%s"}' % seg_etag])
mock_conn = mock.Mock()
mock_conn.get_object.side_effect = [
({'x-static-large-object': True,
'content-length': 30,
'etag': md5(submanifest_etag.encode('ascii') +
seg_etag.encode('ascii')).hexdigest()},
[manifest.encode('ascii')]),
({'x-static-large-object': True,
'content-length': 20,
'etag': submanifest_etag},
submanifest.encode('ascii')),
({'x-static-large-object': True,
'content-length': 30,
'etag': md5(submanifest_etag.encode('ascii') +
seg_etag.encode('ascii')).hexdigest()},
[b'a' * 30])]
type(mock_conn).attempts = mock.PropertyMock(return_value=2)
type(mock_conn).auth_end_time = mock.PropertyMock(return_value=14)
expected_r = {
'action': 'download_object',
'container': 'test_c',
'object': 'test_o',
'success': True,
'response_dict': {},
'path': 'test_o',
'pseudodir': False,
'read_length': 30,
'attempts': 2,
'start_time': 0,
'headers_receipt': 1,
'finish_time': 2,
'auth_end_time': mock_conn.auth_end_time,
}
options = self.opts.copy()
options['out_file'] = f.name
options['skip_identical'] = True
s = SwiftService()
with mock.patch('swiftclient.service.time', side_effect=range(3)):
with mock.patch('swiftclient.service.get_conn',
return_value=mock_conn):
r = s._download_object_job(
conn=mock_conn,
container='test_c',
obj='test_o',
options=options)
self._assertDictEqual(r, expected_r)
self.assertEqual(mock_conn.get_object.mock_calls, [
mock.call('test_c',
'test_o',
resp_chunk_size=65536,
headers={'If-None-Match': on_disk_md5},
query_string='multipart-manifest=get',
response_dict={}),
mock.call('test_c_segments',
'test_sub_slo',
query_string='multipart-manifest=get'),
mock.call('test_c',
'test_o',
resp_chunk_size=65536,
headers={'If-None-Match': on_disk_md5},
response_dict={})])
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by synthtool. DO NOT EDIT!
from __future__ import absolute_import
import os
import pathlib
import shutil
import nox
BLACK_VERSION = "black==19.10b0"
BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
DEFAULT_PYTHON_VERSION = "3.8"
SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
# 'docfx' is excluded since it only needs to run in 'docs-presubmit'
nox.options.sessions = [
"unit",
"system",
"cover",
"lint",
"lint_setup_py",
"blacken",
"docs",
]
# Error if a python version is missing
nox.options.error_on_missing_interpreters = True
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", BLACK_VERSION)
session.run(
"black", "--check", *BLACK_PATHS,
)
session.run("flake8", "google", "tests")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def blacken(session):
"""Run black. Format code to uniform standard."""
session.install(BLACK_VERSION)
session.run(
"black", *BLACK_PATHS,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
session.install(
"mock",
"asyncmock",
"pytest",
"pytest-cov",
"pytest-asyncio",
"-c",
constraints_path,
)
session.install("-e", ".[pandas]", "-c", constraints_path)
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
f"--junitxml=unit_{session.python}_sponge_log.xml",
"--cov=google",
"--cov=tests/unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def system(session):
"""Run the system test suite."""
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
# Install pyopenssl for mTLS testing.
if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true":
session.install("pyopenssl")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path)
session.install("-e", ".[pandas]", "-c", constraints_path)
# Run py.test against the system tests.
if system_test_exists:
session.run(
"py.test",
"--quiet",
f"--junitxml=system_{session.python}_sponge_log.xml",
system_test_path,
*session.posargs,
)
if system_test_folder_exists:
session.run(
"py.test",
"--quiet",
f"--junitxml=system_{session.python}_sponge_log.xml",
system_test_folder_path,
*session.posargs,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=99")
session.run("coverage", "erase")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx==4.0.1", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docfx(session):
"""Build the docfx yaml files for this library."""
session.install("-e", ".")
session.install(
"sphinx==4.0.1", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml"
)
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-T", # show full traceback on exception
"-N", # no colors
"-D",
(
"extensions=sphinx.ext.autodoc,"
"sphinx.ext.autosummary,"
"docfx_yaml.extension,"
"sphinx.ext.intersphinx,"
"sphinx.ext.coverage,"
"sphinx.ext.napoleon,"
"sphinx.ext.todo,"
"sphinx.ext.viewcode,"
"recommonmark"
),
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""This file is part of the django ERP project.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from django.test import TestCase
from . import *
from ..templatetags.introspection import *
from ..templatetags.markup import *
from ..templatetags.perms import *
from ..templatetags.breadcrumbs import *
from ..templatetags.avatar import *
__author__ = 'Emanuele Bertoldi <emanuele.bertoldi@gmail.com>'
__copyright__ = 'Copyright (c) 2013-2015, django ERP Team'
__version__ = '0.0.1'
class JoinStringsTemplateTagTestCase(TestCase):
def test_join_list_with_empty_string(self):
"""Tests "join" templatetag must exclude empty/invalid strings.
"""
self.assertEqual(join("_", "a", "b", "", "d"), "a_b_d")
class SplitFilterTestCase(TestCase):
def test_split_on_string(self):
"""Tests the split filter on a string.
"""
self.assertEqual(split("a/path/to/my/folder", '/'), ["a", "path", "to", "my", "folder"])
class StartsWithFilterTestCase(TestCase):
def test_startswith_on_string(self):
"""Tests the startswith filter on a string.
"""
self.assertEqual(startswith("SelectCar", 'Select'), True)
self.assertEqual(startswith("InputSelect", 'Select'), False)
class EndsWithFilterTestCase(TestCase):
def test_endswith_on_string(self):
"""Tests the endswith filter on a string.
"""
self.assertEqual(endswith("SelectCar", 'Select'), False)
self.assertEqual(endswith("InputSelect", 'Select'), True)
class DiffFilterTestCase(TestCase):
def test_diff_with_number(self):
"""Tests the diff filter using a number.
"""
self.assertEqual(diff(5, 2), 3)
def test_diff_with_string(self):
"""Tests the diff filter using a numeric string.
"""
self.assertEqual(diff(5, "2"), 3)
class GetFilterTestCase(TestCase):
def test_get_from_dict(self):
"""Tests getting a value from a dict.
"""
self.assertEqual(get({"foo": "bar"}, "foo"), "bar")
def test_get_from_list(self):
"""Tests getting a value from a list.
"""
self.assertEqual(get(["foo", "bar"], 1), "bar")
def test_get_from_tuple(self):
"""Tests getting a value from a tuple.
"""
self.assertEqual(get(("foo", "bar"), 1), "bar")
def test_get_invalid_from_list_or_tuple(self):
"""Tests getting a value from a tuple.
"""
self.assertEqual(get(["foo", "bar"], "foo"), "")
self.assertEqual(get(("foo", "bar"), "foo"), "")
def test_get_from_object(self):
"""Tests getting a value from an object.
"""
class TestObject:
foo = "bar"
def foo_func(self): return "bar_func"
obj = TestObject()
self.assertEqual(get(obj, "foo"), "bar")
self.assertEqual(get(obj, "foo_func"), "bar_func")
class ModelNameFilterTestCase(TestCase):
def test_valid_model_name(self):
"""Tests returning of a valid model name using "model_name" filter.
"""
self.assertEqual(model_name(get_user_model()), "user")
self.assertEqual(model_name(get_user_model()), "user")
def test_invalid_model_name(self):
"""Tests "model_name" filter on an invalid input.
"""
class FakeObject:
pass
self.assertEqual(model_name(FakeObject), "")
self.assertEqual(model_name(FakeObject()), "")
def test_plural_model_name(self):
"""Tests returning of a plural model name using "model_name" filter.
"""
self.assertEqual(model_name_plural(get_user_model()), "users")
self.assertEqual(model_name_plural(get_user_model()), "users")
def test_invalid_plural_model_name(self):
"""Tests "model_name_plural" filter on an invalid input.
"""
class FakeObject:
pass
self.assertEqual(model_name_plural(FakeObject), "")
self.assertEqual(model_name_plural(FakeObject()), "")
def test_proxy_model_name(self):
"""Tests proxy-model name must be returned instead of concrete one.
"""
class ProxyUser(get_user_model()):
class Meta:
proxy = True
verbose_name = 'proxy user'
verbose_name_plural = 'proxy users'
self.assertEqual(model_name(ProxyUser), 'proxy user')
self.assertEqual(model_name(ProxyUser()), 'proxy user')
self.assertEqual(model_name_plural(ProxyUser), 'proxy users')
self.assertEqual(model_name_plural(ProxyUser()), 'proxy users')
def test_valid_raw_model_name(self):
"""Tests returning of a valid model name using "raw_model_name" filter.
"""
self.assertEqual(raw_model_name(get_user_model()), "user")
self.assertEqual(raw_model_name(get_user_model()), "user")
def test_invalid_raw_model_name(self):
"""Tests "raw_model_name" filter on an invalid input.
"""
class FakeObject:
pass
self.assertEqual(raw_model_name(FakeObject), "")
self.assertEqual(raw_model_name(FakeObject()), "")
def test_plural_raw_model_name(self):
"""Tests returning of a plural model name using "raw_model_name_plural" filter.
"""
self.assertEqual(raw_model_name_plural(get_user_model()), "users")
self.assertEqual(raw_model_name_plural(get_user_model()), "users")
def test_invalid_plural_raw_model_name(self):
"""Tests "raw_model_name_plural" filter on an invalid input.
"""
class FakeObject:
pass
self.assertEqual(raw_model_name_plural(FakeObject), "")
self.assertEqual(raw_model_name_plural(FakeObject()), "")
class UserHasPermTagTestCase(TestCase):
def test_user_has_perm(self):
"""Tests that "user_has_perm" check perms on both model and obj levels.
"""
u7, n = get_user_model().objects.get_or_create(username="u7")
u8, n = get_user_model().objects.get_or_create(username="u8")
prev_user = logged_cache.user
# Checking perms for u7 (saved in LoggedInUserCache).
logged_cache.user = u7
self.assertFalse(user_has_perm(u8, "%s.view_user" % auth_app))
self.assertFalse(user_has_perm(u8, "%s.change_user" % auth_app))
self.assertFalse(user_has_perm(u8, "%s.delete_user" % auth_app))
op, n = ObjectPermission.objects.get_or_create_by_uid("%s.view_user.%s" % (auth_app, u8.pk))
u7.objectpermissions.add(op)
clear_perm_caches(u7)
self.assertTrue(user_has_perm(u8, "%s.view_user" % auth_app))
self.assertFalse(user_has_perm(u8, "%s.change_user" % auth_app))
self.assertFalse(user_has_perm(u8, "%s.delete_user" % auth_app))
p, n = Permission.objects.get_or_create_by_uid("%s.change_user" % auth_app)
u7.user_permissions.add(p)
clear_perm_caches(u7)
self.assertTrue(user_has_perm(u8, "%s.view_user" % auth_app))
self.assertTrue(user_has_perm(u8, "%s.change_user" % auth_app))
self.assertFalse(user_has_perm(u8, "%s.delete_user" % auth_app))
# Restores previous cached user.
logged_cache.user = prev_user
class BreadcrumbsTagsTestCase(TestCase):
urls = 'djangoerp.core.tests.urls'
def setUp(self):
self.context = {"request": FakeRequest()}
self.clear_breadcrumbs()
def del_breadcrumbs(self):
delattr(self.context['request'], 'breadcrumbs')
def clear_breadcrumbs(self):
self.context['request'].breadcrumbs = []
def get_breadcrumbs(self):
return self.context['request'].breadcrumbs
def test_adding_breadcrumbs_var_to_context(self):
"""Tests adding by default "breadcrumbs" var to context if not present.
"""
self.del_breadcrumbs()
self.assertFalse(hasattr(self.context['request'], 'breadcrumbs'))
add_crumb(self.context, "Go")
self.assertTrue(hasattr(self.context['request'], 'breadcrumbs'))
self.assertEqual(type(self.context['request'].breadcrumbs), list)
self.clear_breadcrumbs()
def test_fail_adding_empty_crumb(self):
"""Tests empty crumbs are not allowed.
"""
self.clear_breadcrumbs()
self.assertEqual(len(self.get_breadcrumbs()), 0)
add_crumb(self.context, None)
self.assertEqual(len(self.get_breadcrumbs()), 0)
def test_add_crumb_with_empty_url(self):
"""Tests "add_crumb" templatetag with an empty URL.
"""
self.clear_breadcrumbs()
self.assertEqual(len(self.get_breadcrumbs()), 0)
add_crumb(self.context, "Home")
self.assertEqual(len(self.get_breadcrumbs()), 1)
self.assertEqual(self.get_breadcrumbs()[0], ("Home", None))
def test_add_crumb_with_valid_url(self):
"""Tests "add_crumb" templatetag with a valid URL.
"""
self.clear_breadcrumbs()
self.assertEqual(len(self.get_breadcrumbs()), 0)
add_crumb(self.context, "Home", "/")
self.assertEqual(len(self.get_breadcrumbs()), 1)
self.assertEqual(self.get_breadcrumbs()[0], ("Home", "/"))
def test_add_crumb_with_view_name(self):
"""Tests "add_crumb" templatetag with a view name instead of an URL.
"""
self.clear_breadcrumbs()
self.assertEqual(len(self.get_breadcrumbs()), 0)
add_crumb(self.context, "Home", "private_zone_url")
self.assertEqual(len(self.get_breadcrumbs()), 1)
self.assertEqual(self.get_breadcrumbs()[0], ("Home", "/private/"))
def test_remove_last_crumb_to_empty_list(self):
"""Tests "remove_last_crumb" templatetag from an empty breadcrumb list.
"""
self.clear_breadcrumbs()
self.assertEqual(len(self.get_breadcrumbs()), 0)
remove_last_crumb(self.context)
self.assertEqual(len(self.get_breadcrumbs()), 0)
def test_remove_last_crumb(self):
"""Tests "remove_last_crumb" templatetag from a breadcrumb list.
"""
self.clear_breadcrumbs()
self.assertEqual(len(self.get_breadcrumbs()), 0)
add_crumb(self.context, "Home", "/")
add_crumb(self.context, "Private zone", "private_zone_url")
self.assertEqual(len(self.get_breadcrumbs()), 2)
remove_last_crumb(self.context)
self.assertEqual(len(self.get_breadcrumbs()), 1)
self.assertEqual(self.get_breadcrumbs()[0], ("Home", "/"))
def test_render_invalid_breadcrumbs(self):
"""Tests "render_breadcrumbs" templatetag without registered breadcrumbs.
"""
self.del_breadcrumbs()
self.assertEqual(render_breadcrumbs(self.context), {"breadcrumbs": None})
self.clear_breadcrumbs()
def test_render_empty_breadcrumbs(self):
"""Tests "render_breadcrumbs" templatetag with an empty breadcrumb list.
"""
self.clear_breadcrumbs()
self.assertEqual(render_breadcrumbs(self.context), {"breadcrumbs": []})
def test_render_breadcrumbs(self):
"""Tests "render_breadcrumbs" templatetag with a valid breadcrumb list.
"""
self.clear_breadcrumbs()
add_crumb(self.context, "Home", "/")
add_crumb(self.context, "Private zone", "private_zone_url")
self.assertEqual(
render_breadcrumbs(self.context),
{"breadcrumbs": [("Home", "/"), ("Private zone", "/private/")]}
)
class AvatarTagTestCase(TestCase):
def test_empty_avatar(self):
"""Tests "avatar" templatetag with empty params.
"""
self.assertEqual(
avatar(None),
'<img class="image" width="32" height="32" src="http://www.gravatar.com/avatar/?s=32&r=g&d=mm" />'
)
def test_valid_avatar(self):
"""Tests "avatar" templatetag with valid email.
"""
self.assertEqual(
avatar("u@u.it"),
'<img class="image" width="32" height="32" src="http://www.gravatar.com/avatar/754331256868501f6cdcc08efab6dd1e?s=32&r=g&d=mm" />'
)
def test_set_avatar_size(self):
"""Tests "avatar" templatetag with different size.
"""
self.assertEqual(
avatar("u@u.it", 80),
'<img class="image" width="80" height="80" src="http://www.gravatar.com/avatar/754331256868501f6cdcc08efab6dd1e?s=80&r=g&d=mm" />'
)
def test_set_default_avatar(self):
"""Tests "avatar" templatetag with a default image.
"""
self.assertEqual(
avatar("u@u.it", default="http://localhost:8000/my_default_image.jpg"),
'<img class="image" width="32" height="32" src="http://www.gravatar.com/avatar/754331256868501f6cdcc08efab6dd1e?s=32&r=g&d=http://localhost:8000/my_default_image.jpg" />'
)
|
|
"""The tests for Monoprice Media player platform."""
import unittest
from unittest import mock
import voluptuous as vol
from collections import defaultdict
from homeassistant.components.media_player import (
DOMAIN, SUPPORT_TURN_ON, SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP, SUPPORT_SELECT_SOURCE)
from homeassistant.const import STATE_ON, STATE_OFF
import tests.common
from homeassistant.components.media_player.monoprice import (
DATA_MONOPRICE, PLATFORM_SCHEMA, SERVICE_SNAPSHOT,
SERVICE_RESTORE, setup_platform)
class AttrDict(dict):
"""Helper class for mocking attributes."""
def __setattr__(self, name, value):
"""Set attribute."""
self[name] = value
def __getattr__(self, item):
"""Get attribute."""
return self[item]
class MockMonoprice(object):
"""Mock for pymonoprice object."""
def __init__(self):
"""Init mock object."""
self.zones = defaultdict(lambda: AttrDict(power=True,
volume=0,
mute=True,
source=1))
def zone_status(self, zone_id):
"""Get zone status."""
status = self.zones[zone_id]
status.zone = zone_id
return AttrDict(status)
def set_source(self, zone_id, source_idx):
"""Set source for zone."""
self.zones[zone_id].source = source_idx
def set_power(self, zone_id, power):
"""Turn zone on/off."""
self.zones[zone_id].power = power
def set_mute(self, zone_id, mute):
"""Mute/unmute zone."""
self.zones[zone_id].mute = mute
def set_volume(self, zone_id, volume):
"""Set volume for zone."""
self.zones[zone_id].volume = volume
def restore_zone(self, zone):
"""Restore zone status."""
self.zones[zone.zone] = AttrDict(zone)
class TestMonopriceSchema(unittest.TestCase):
"""Test Monoprice schema."""
def test_valid_schema(self):
"""Test valid schema."""
valid_schema = {
'platform': 'monoprice',
'port': '/dev/ttyUSB0',
'zones': {11: {'name': 'a'},
12: {'name': 'a'},
13: {'name': 'a'},
14: {'name': 'a'},
15: {'name': 'a'},
16: {'name': 'a'},
21: {'name': 'a'},
22: {'name': 'a'},
23: {'name': 'a'},
24: {'name': 'a'},
25: {'name': 'a'},
26: {'name': 'a'},
31: {'name': 'a'},
32: {'name': 'a'},
33: {'name': 'a'},
34: {'name': 'a'},
35: {'name': 'a'},
36: {'name': 'a'},
},
'sources': {
1: {'name': 'a'},
2: {'name': 'a'},
3: {'name': 'a'},
4: {'name': 'a'},
5: {'name': 'a'},
6: {'name': 'a'}
}
}
PLATFORM_SCHEMA(valid_schema)
def test_invalid_schemas(self):
"""Test invalid schemas."""
schemas = (
{}, # Empty
None, # None
# Missing port
{
'platform': 'monoprice',
'name': 'Name',
'zones': {11: {'name': 'a'}},
'sources': {1: {'name': 'b'}},
},
# Invalid zone number
{
'platform': 'monoprice',
'port': 'aaa',
'name': 'Name',
'zones': {10: {'name': 'a'}},
'sources': {1: {'name': 'b'}},
},
# Invalid source number
{
'platform': 'monoprice',
'port': 'aaa',
'name': 'Name',
'zones': {11: {'name': 'a'}},
'sources': {0: {'name': 'b'}},
},
# Zone missing name
{
'platform': 'monoprice',
'port': 'aaa',
'name': 'Name',
'zones': {11: {}},
'sources': {1: {'name': 'b'}},
},
# Source missing name
{
'platform': 'monoprice',
'port': 'aaa',
'name': 'Name',
'zones': {11: {'name': 'a'}},
'sources': {1: {}},
},
)
for value in schemas:
with self.assertRaises(vol.MultipleInvalid):
PLATFORM_SCHEMA(value)
class TestMonopriceMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
def setUp(self):
"""Set up the test case."""
self.monoprice = MockMonoprice()
self.hass = tests.common.get_test_home_assistant()
self.hass.start()
# Note, source dictionary is unsorted!
with mock.patch('pymonoprice.get_monoprice',
new=lambda *a: self.monoprice):
setup_platform(self.hass, {
'platform': 'monoprice',
'port': '/dev/ttyS0',
'name': 'Name',
'zones': {12: {'name': 'Zone name'}},
'sources': {1: {'name': 'one'},
3: {'name': 'three'},
2: {'name': 'two'}},
}, lambda *args, **kwargs: None, {})
self.hass.block_till_done()
self.media_player = self.hass.data[DATA_MONOPRICE][0]
self.media_player.hass = self.hass
self.media_player.entity_id = 'media_player.zone_1'
def tearDown(self):
"""Tear down the test case."""
self.hass.stop()
def test_setup_platform(self, *args):
"""Test setting up platform."""
# Two services must be registered
self.assertTrue(self.hass.services.has_service(DOMAIN,
SERVICE_RESTORE))
self.assertTrue(self.hass.services.has_service(DOMAIN,
SERVICE_SNAPSHOT))
self.assertEqual(len(self.hass.data[DATA_MONOPRICE]), 1)
self.assertEqual(self.hass.data[DATA_MONOPRICE][0].name, 'Zone name')
def test_service_calls_with_entity_id(self):
"""Test snapshot save/restore service calls."""
self.media_player.update()
self.assertEqual('Zone name', self.media_player.name)
self.assertEqual(STATE_ON, self.media_player.state)
self.assertEqual(0.0, self.media_player.volume_level, 0.0001)
self.assertTrue(self.media_player.is_volume_muted)
self.assertEqual('one', self.media_player.source)
# Saving default values
self.hass.services.call(DOMAIN, SERVICE_SNAPSHOT,
{'entity_id': 'media_player.zone_1'},
blocking=True)
# self.hass.block_till_done()
# Changing media player to new state
self.media_player.set_volume_level(1)
self.media_player.select_source('two')
self.media_player.mute_volume(False)
self.media_player.turn_off()
# Checking that values were indeed changed
self.media_player.update()
self.assertEqual('Zone name', self.media_player.name)
self.assertEqual(STATE_OFF, self.media_player.state)
self.assertEqual(1.0, self.media_player.volume_level, 0.0001)
self.assertFalse(self.media_player.is_volume_muted)
self.assertEqual('two', self.media_player.source)
# Restoring wrong media player to its previous state
# Nothing should be done
self.hass.services.call(DOMAIN, SERVICE_RESTORE,
{'entity_id': 'not_existing'},
blocking=True)
# self.hass.block_till_done()
# Checking that values were not (!) restored
self.media_player.update()
self.assertEqual('Zone name', self.media_player.name)
self.assertEqual(STATE_OFF, self.media_player.state)
self.assertEqual(1.0, self.media_player.volume_level, 0.0001)
self.assertFalse(self.media_player.is_volume_muted)
self.assertEqual('two', self.media_player.source)
# Restoring media player to its previous state
self.hass.services.call(DOMAIN, SERVICE_RESTORE,
{'entity_id': 'media_player.zone_1'},
blocking=True)
self.hass.block_till_done()
# Checking that values were restored
self.assertEqual('Zone name', self.media_player.name)
self.assertEqual(STATE_ON, self.media_player.state)
self.assertEqual(0.0, self.media_player.volume_level, 0.0001)
self.assertTrue(self.media_player.is_volume_muted)
self.assertEqual('one', self.media_player.source)
def test_service_calls_without_entity_id(self):
"""Test snapshot save/restore service calls."""
self.media_player.update()
self.assertEqual('Zone name', self.media_player.name)
self.assertEqual(STATE_ON, self.media_player.state)
self.assertEqual(0.0, self.media_player.volume_level, 0.0001)
self.assertTrue(self.media_player.is_volume_muted)
self.assertEqual('one', self.media_player.source)
# Restoring media player
# since there is no snapshot, nothing should be done
self.hass.services.call(DOMAIN, SERVICE_RESTORE, blocking=True)
self.hass.block_till_done()
self.media_player.update()
self.assertEqual('Zone name', self.media_player.name)
self.assertEqual(STATE_ON, self.media_player.state)
self.assertEqual(0.0, self.media_player.volume_level, 0.0001)
self.assertTrue(self.media_player.is_volume_muted)
self.assertEqual('one', self.media_player.source)
# Saving default values
self.hass.services.call(DOMAIN, SERVICE_SNAPSHOT, blocking=True)
self.hass.block_till_done()
# Changing media player to new state
self.media_player.set_volume_level(1)
self.media_player.select_source('two')
self.media_player.mute_volume(False)
self.media_player.turn_off()
# Checking that values were indeed changed
self.media_player.update()
self.assertEqual('Zone name', self.media_player.name)
self.assertEqual(STATE_OFF, self.media_player.state)
self.assertEqual(1.0, self.media_player.volume_level, 0.0001)
self.assertFalse(self.media_player.is_volume_muted)
self.assertEqual('two', self.media_player.source)
# Restoring media player to its previous state
self.hass.services.call(DOMAIN, SERVICE_RESTORE, blocking=True)
self.hass.block_till_done()
# Checking that values were restored
self.assertEqual('Zone name', self.media_player.name)
self.assertEqual(STATE_ON, self.media_player.state)
self.assertEqual(0.0, self.media_player.volume_level, 0.0001)
self.assertTrue(self.media_player.is_volume_muted)
self.assertEqual('one', self.media_player.source)
def test_update(self):
"""Test updating values from monoprice."""
self.assertIsNone(self.media_player.state)
self.assertIsNone(self.media_player.volume_level)
self.assertIsNone(self.media_player.is_volume_muted)
self.assertIsNone(self.media_player.source)
self.media_player.update()
self.assertEqual(STATE_ON, self.media_player.state)
self.assertEqual(0.0, self.media_player.volume_level, 0.0001)
self.assertTrue(self.media_player.is_volume_muted)
self.assertEqual('one', self.media_player.source)
def test_name(self):
"""Test name property."""
self.assertEqual('Zone name', self.media_player.name)
def test_state(self):
"""Test state property."""
self.assertIsNone(self.media_player.state)
self.media_player.update()
self.assertEqual(STATE_ON, self.media_player.state)
self.monoprice.zones[12].power = False
self.media_player.update()
self.assertEqual(STATE_OFF, self.media_player.state)
def test_volume_level(self):
"""Test volume level property."""
self.assertIsNone(self.media_player.volume_level)
self.media_player.update()
self.assertEqual(0.0, self.media_player.volume_level, 0.0001)
self.monoprice.zones[12].volume = 38
self.media_player.update()
self.assertEqual(1.0, self.media_player.volume_level, 0.0001)
self.monoprice.zones[12].volume = 19
self.media_player.update()
self.assertEqual(.5, self.media_player.volume_level, 0.0001)
def test_is_volume_muted(self):
"""Test volume muted property."""
self.assertIsNone(self.media_player.is_volume_muted)
self.media_player.update()
self.assertTrue(self.media_player.is_volume_muted)
self.monoprice.zones[12].mute = False
self.media_player.update()
self.assertFalse(self.media_player.is_volume_muted)
def test_supported_features(self):
"""Test supported features property."""
self.assertEqual(SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET |
SUPPORT_VOLUME_STEP | SUPPORT_TURN_ON |
SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE,
self.media_player.supported_features)
def test_source(self):
"""Test source property."""
self.assertIsNone(self.media_player.source)
self.media_player.update()
self.assertEqual('one', self.media_player.source)
def test_media_title(self):
"""Test media title property."""
self.assertIsNone(self.media_player.media_title)
self.media_player.update()
self.assertEqual('one', self.media_player.media_title)
def test_source_list(self):
"""Test source list property."""
# Note, the list is sorted!
self.assertEqual(['one', 'two', 'three'],
self.media_player.source_list)
def test_select_source(self):
"""Test source selection methods."""
self.media_player.update()
self.assertEqual('one', self.media_player.source)
self.media_player.select_source('two')
self.assertEqual(2, self.monoprice.zones[12].source)
self.media_player.update()
self.assertEqual('two', self.media_player.source)
# Trying to set unknown source
self.media_player.select_source('no name')
self.assertEqual(2, self.monoprice.zones[12].source)
self.media_player.update()
self.assertEqual('two', self.media_player.source)
def test_turn_on(self):
"""Test turning on the zone."""
self.monoprice.zones[12].power = False
self.media_player.update()
self.assertEqual(STATE_OFF, self.media_player.state)
self.media_player.turn_on()
self.assertTrue(self.monoprice.zones[12].power)
self.media_player.update()
self.assertEqual(STATE_ON, self.media_player.state)
def test_turn_off(self):
"""Test turning off the zone."""
self.monoprice.zones[12].power = True
self.media_player.update()
self.assertEqual(STATE_ON, self.media_player.state)
self.media_player.turn_off()
self.assertFalse(self.monoprice.zones[12].power)
self.media_player.update()
self.assertEqual(STATE_OFF, self.media_player.state)
def test_mute_volume(self):
"""Test mute functionality."""
self.monoprice.zones[12].mute = True
self.media_player.update()
self.assertTrue(self.media_player.is_volume_muted)
self.media_player.mute_volume(False)
self.assertFalse(self.monoprice.zones[12].mute)
self.media_player.update()
self.assertFalse(self.media_player.is_volume_muted)
self.media_player.mute_volume(True)
self.assertTrue(self.monoprice.zones[12].mute)
self.media_player.update()
self.assertTrue(self.media_player.is_volume_muted)
def test_set_volume_level(self):
"""Test set volume level."""
self.media_player.set_volume_level(1.0)
self.assertEqual(38, self.monoprice.zones[12].volume)
self.assertTrue(isinstance(self.monoprice.zones[12].volume, int))
self.media_player.set_volume_level(0.0)
self.assertEqual(0, self.monoprice.zones[12].volume)
self.assertTrue(isinstance(self.monoprice.zones[12].volume, int))
self.media_player.set_volume_level(0.5)
self.assertEqual(19, self.monoprice.zones[12].volume)
self.assertTrue(isinstance(self.monoprice.zones[12].volume, int))
def test_volume_up(self):
"""Test increasing volume by one."""
self.monoprice.zones[12].volume = 37
self.media_player.update()
self.media_player.volume_up()
self.assertEqual(38, self.monoprice.zones[12].volume)
self.assertTrue(isinstance(self.monoprice.zones[12].volume, int))
# Try to raise value beyond max
self.media_player.update()
self.media_player.volume_up()
self.assertEqual(38, self.monoprice.zones[12].volume)
self.assertTrue(isinstance(self.monoprice.zones[12].volume, int))
def test_volume_down(self):
"""Test decreasing volume by one."""
self.monoprice.zones[12].volume = 1
self.media_player.update()
self.media_player.volume_down()
self.assertEqual(0, self.monoprice.zones[12].volume)
self.assertTrue(isinstance(self.monoprice.zones[12].volume, int))
# Try to lower value beyond minimum
self.media_player.update()
self.media_player.volume_down()
self.assertEqual(0, self.monoprice.zones[12].volume)
self.assertTrue(isinstance(self.monoprice.zones[12].volume, int))
|
|
import logging
from xml.etree import ElementTree as etree
from xml.parsers import expat
try:
from oslo_serialization import jsonutils
except ImportError:
from oslo.serialization import jsonutils
from builtins import int
import six
from . import constants
from . import exceptions as exception
from ..i18n import _
LOG = logging.getLogger(__name__)
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class DictSerializer(ActionDispatcher):
"""Default request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization."""
def default(self, data):
def sanitizer(obj):
return six.text_type(obj)
return jsonutils.dumps(data, default=sanitizer)
class XMLDictSerializer(DictSerializer):
def __init__(self, metadata=None, xmlns=None):
"""XMLDictSerializer constructor.
:param metadata: information needed to deserialize XML into
a dictionary.
:param xmlns: XML namespace to include with serialized XML
"""
super(XMLDictSerializer, self).__init__()
self.metadata = metadata or {}
if not xmlns:
xmlns = self.metadata.get('xmlns')
if not xmlns:
xmlns = constants.XML_NS_V20
self.xmlns = xmlns
def default(self, data):
"""Default serializer of XMLDictSerializer.
:param data: expect data to contain a single key as XML root, or
contain another '*_links' key as atom links. Other
case will use 'VIRTUAL_ROOT_KEY' as XML root.
"""
try:
links = None
has_atom = False
if data is None:
root_key = constants.VIRTUAL_ROOT_KEY
root_value = None
else:
link_keys = [k for k in six.iterkeys(data) or []
if k.endswith('_links')]
if link_keys:
links = data.pop(link_keys[0], None)
has_atom = True
root_key = (len(data) == 1 and
list(data.keys())[0] or constants.VIRTUAL_ROOT_KEY)
root_value = data.get(root_key, data)
doc = etree.Element("_temp_root")
used_prefixes = []
self._to_xml_node(doc, self.metadata, root_key,
root_value, used_prefixes)
if links:
self._create_link_nodes(list(doc)[0], links)
return self.to_xml_string(list(doc)[0], used_prefixes, has_atom)
except AttributeError as e:
LOG.exception(str(e))
return ''
def __call__(self, data):
# Provides a migration path to a cleaner WSGI layer, this
# "default" stuff and extreme extensibility isn't being used
# like originally intended
return self.default(data)
def to_xml_string(self, node, used_prefixes, has_atom=False):
self._add_xmlns(node, used_prefixes, has_atom)
return etree.tostring(node, encoding='UTF-8')
# NOTE (ameade): the has_atom should be removed after all of the
# XML serializers and view builders have been updated to the current
# spec that required all responses include the xmlns:atom, the has_atom
# flag is to prevent current tests from breaking
def _add_xmlns(self, node, used_prefixes, has_atom=False):
node.set('xmlns', self.xmlns)
node.set(constants.TYPE_XMLNS, self.xmlns)
if has_atom:
node.set(constants.ATOM_XMLNS, constants.ATOM_NAMESPACE)
node.set(constants.XSI_NIL_ATTR, constants.XSI_NAMESPACE)
ext_ns = self.metadata.get(constants.EXT_NS, {})
for prefix in used_prefixes:
if prefix in ext_ns:
node.set('xmlns:' + prefix, ext_ns[prefix])
def _to_xml_node(self, parent, metadata, nodename, data, used_prefixes):
"""Recursive method to convert data members to XML nodes."""
result = etree.SubElement(parent, nodename)
if ":" in nodename:
used_prefixes.append(nodename.split(":", 1)[0])
# TODO(bcwaldon): accomplish this without a type-check
if isinstance(data, list):
if not data:
result.set(
constants.TYPE_ATTR,
constants.TYPE_LIST)
return result
singular = metadata.get('plurals', {}).get(nodename, None)
if singular is None:
if nodename.endswith('s'):
singular = nodename[:-1]
else:
singular = 'item'
for item in data:
self._to_xml_node(result, metadata, singular, item,
used_prefixes)
# TODO(bcwaldon): accomplish this without a type-check
elif isinstance(data, dict):
if not data:
result.set(
constants.TYPE_ATTR,
constants.TYPE_DICT)
return result
attrs = metadata.get('attributes', {}).get(nodename, {})
for k, v in sorted(data.items()):
if k in attrs:
result.set(k, str(v))
else:
self._to_xml_node(result, metadata, k, v,
used_prefixes)
elif data is None:
result.set(constants.XSI_ATTR, 'true')
else:
if isinstance(data, bool):
result.set(
constants.TYPE_ATTR,
constants.TYPE_BOOL)
elif isinstance(data, int):
result.set(
constants.TYPE_ATTR,
constants.TYPE_INT)
elif isinstance(data, float):
result.set(
constants.TYPE_ATTR,
constants.TYPE_FLOAT)
LOG.debug("Data %(data)s type is %(type)s",
{'data': data,
'type': type(data)})
result.text = six.text_type(data)
return result
def _create_link_nodes(self, xml_doc, links):
for link in links:
link_node = etree.SubElement(xml_doc, 'atom:link')
link_node.set('rel', link['rel'])
link_node.set('href', link['href'])
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization."""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("Cannot understand JSON")
raise exception.MalformedResponseBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class XMLDeserializer(TextDeserializer):
def __init__(self, metadata=None):
"""XMLDeserializer constructor.
:param metadata: information needed to deserialize XML into
a dictionary.
"""
super(XMLDeserializer, self).__init__()
self.metadata = metadata or {}
xmlns = self.metadata.get('xmlns')
if not xmlns:
xmlns = constants.XML_NS_V20
self.xmlns = xmlns
def _get_key(self, tag):
tags = tag.split("}", 1)
if len(tags) == 2:
ns = tags[0][1:]
bare_tag = tags[1]
ext_ns = self.metadata.get(constants.EXT_NS, {})
if ns == self.xmlns:
return bare_tag
for prefix, _ns in ext_ns.items():
if ns == _ns:
return prefix + ":" + bare_tag
else:
return tag
def _get_links(self, root_tag, node):
link_nodes = node.findall(constants.ATOM_LINK_NOTATION)
root_tag = self._get_key(node.tag)
link_key = "%s_links" % root_tag
link_list = []
for link in link_nodes:
link_list.append({'rel': link.get('rel'),
'href': link.get('href')})
# Remove link node in order to avoid link node being
# processed as an item in _from_xml_node
node.remove(link)
return link_list and {link_key: link_list} or {}
def _from_xml(self, datastring):
if datastring is None:
return None
plurals = set(self.metadata.get('plurals', {}))
try:
node = etree.fromstring(datastring)
root_tag = self._get_key(node.tag)
links = self._get_links(root_tag, node)
result = self._from_xml_node(node, plurals)
# There is no case where root_tag = constants.VIRTUAL_ROOT_KEY
# and links is not None because of the way data are serialized
if root_tag == constants.VIRTUAL_ROOT_KEY:
return result
return dict({root_tag: result}, **links)
except Exception as e:
parseError = False
# Python2.7
if (hasattr(etree, 'ParseError') and
isinstance(e, getattr(etree, 'ParseError'))):
parseError = True
# Python2.6
elif isinstance(e, expat.ExpatError):
parseError = True
if parseError:
msg = _("Cannot understand XML")
raise exception.MalformedResponseBody(reason=msg)
else:
raise
def _from_xml_node(self, node, listnames):
"""Convert a minidom node to a simple Python type.
:param node: minidom node name
:param listnames: list of XML node names whose subnodes should
be considered list items.
"""
attrNil = node.get(str(etree.QName(constants.XSI_NAMESPACE, "nil")))
attrType = node.get(str(etree.QName(
self.metadata.get('xmlns'), "type")))
if attrNil and attrNil.lower() == 'true':
return None
elif not len(node) and not node.text:
if attrType and attrType == constants.TYPE_DICT:
return {}
elif attrType and attrType == constants.TYPE_LIST:
return []
else:
return ''
elif len(node) == 0 and node.text:
converters = {constants.TYPE_BOOL:
lambda x: x.lower() == 'true',
constants.TYPE_INT:
lambda x: int(x),
constants.TYPE_FLOAT:
lambda x: float(x)}
if attrType and attrType in converters:
return converters[attrType](node.text)
else:
return node.text
elif self._get_key(node.tag) in listnames:
return [self._from_xml_node(n, listnames) for n in node]
else:
result = dict()
for attr in node.keys():
if (attr == 'xmlns' or
attr.startswith('xmlns:') or
attr == constants.XSI_ATTR or
attr == constants.TYPE_ATTR):
continue
result[self._get_key(attr)] = node.get(attr)
children = list(node)
for child in children:
result[self._get_key(child.tag)] = self._from_xml_node(
child, listnames)
return result
def default(self, datastring):
return {'body': self._from_xml(datastring)}
def __call__(self, datastring):
# Adding a migration path to allow us to remove unncessary classes
return self.default(datastring)
# NOTE(maru): this class is duplicated from neutron.wsgi
class Serializer(object):
"""Serializes and deserializes dictionaries to certain MIME types."""
def __init__(self, metadata=None, default_xmlns=None):
"""Create a serializer based on the given WSGI environment.
'metadata' is an optional dict mapping MIME types to information
needed to serialize a dictionary to that type.
"""
self.metadata = metadata or {}
self.default_xmlns = default_xmlns
def _get_serialize_handler(self, content_type):
handlers = {
'application/json': JSONDictSerializer(),
'application/xml': XMLDictSerializer(self.metadata),
}
try:
return handlers[content_type]
except Exception:
raise exception.InvalidContentType(content_type=content_type)
def serialize(self, data, content_type):
"""Serialize a dictionary into the specified content type."""
return self._get_serialize_handler(content_type).serialize(data)
def deserialize(self, datastring, content_type):
"""Deserialize a string to a dictionary.
The string must be in the format of a supported MIME type.
"""
return self.get_deserialize_handler(content_type).deserialize(
datastring)
def get_deserialize_handler(self, content_type):
handlers = {
'application/json': JSONDeserializer(),
'application/xml': XMLDeserializer(self.metadata),
}
try:
return handlers[content_type]
except Exception:
raise exception.InvalidContentType(content_type=content_type)
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from typing import Any, Callable, Optional
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.test import TestCase
from zerver.models import (
get_client, get_realm_by_string_id, get_stream, get_user_profile_by_email,
Message, Recipient, UserProfile
)
from zerver.lib.actions import (
apply_events,
bulk_remove_subscriptions,
do_add_alert_words,
check_add_realm_emoji,
do_add_realm_filter,
do_change_avatar_source,
do_change_default_all_public_streams,
do_change_default_events_register_stream,
do_change_default_sending_stream,
do_change_full_name,
do_change_is_admin,
do_change_stream_description,
do_change_subscription_property,
do_create_user,
do_deactivate_stream,
do_deactivate_user,
do_regenerate_api_key,
do_remove_alert_words,
do_remove_realm_emoji,
do_remove_realm_filter,
do_rename_stream,
do_add_default_stream,
do_set_muted_topics,
do_set_realm_create_stream_by_admins_only,
do_set_realm_name,
do_set_realm_restricted_to_domain,
do_set_realm_invite_required,
do_set_realm_invite_by_admins_only,
do_set_realm_message_editing,
do_set_realm_default_language,
do_set_realm_authentication_methods,
do_update_message,
do_update_pointer,
do_change_twenty_four_hour_time,
do_change_left_side_userlist,
fetch_initial_state_data,
get_subscription
)
from zerver.lib.event_queue import allocate_client_descriptor
from zerver.lib.message import render_markdown
from zerver.lib.test_helpers import POSTRequestMock
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.validator import (
check_bool, check_dict, check_int, check_list, check_string,
equals, check_none_or, Validator
)
from zerver.views.events_register import _default_all_public_streams, _default_narrow
from zerver.tornadoviews import get_events_backend
from collections import OrderedDict
import mock
import time
import ujson
from six.moves import range
class TornadoTest(ZulipTestCase):
def test_tornado_endpoint(self):
# type: () -> None
# This test is mostly intended to get minimal coverage on
# the /notify_tornado endpoint, so we can have 100% URL coverage,
# but it does exercise a little bit of the codepath.
post_data = dict(
data=ujson.dumps(
dict(
event=dict(
type='other'
),
users=[get_user_profile_by_email('hamlet@zulip.com').id],
),
),
)
req = POSTRequestMock(post_data, user_profile=None)
req.META['REMOTE_ADDR'] = '127.0.0.1'
result = self.client_post_request('/notify_tornado', req)
self.assert_json_error(result, 'Access denied', status_code=403)
post_data['secret'] = settings.SHARED_SECRET
req = POSTRequestMock(post_data, user_profile=None)
req.META['REMOTE_ADDR'] = '127.0.0.1'
result = self.client_post_request('/notify_tornado', req)
self.assert_json_success(result)
class GetEventsTest(ZulipTestCase):
def tornado_call(self, view_func, user_profile, post_data):
# type: (Callable[[HttpRequest, UserProfile], HttpResponse], UserProfile, Dict[str, Any]) -> HttpResponse
request = POSTRequestMock(post_data, user_profile)
return view_func(request, user_profile)
def test_get_events(self):
# type: () -> None
email = "hamlet@zulip.com"
recipient_email = "othello@zulip.com"
user_profile = get_user_profile_by_email(email)
recipient_user_profile = get_user_profile_by_email(recipient_email)
self.login(email)
result = self.tornado_call(get_events_backend, user_profile,
{"apply_markdown": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(result)
queue_id = ujson.loads(result.content)["queue_id"]
recipient_result = self.tornado_call(get_events_backend, recipient_user_profile,
{"apply_markdown": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(recipient_result)
recipient_queue_id = ujson.loads(recipient_result.content)["queue_id"]
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0)
local_id = 10.01
self.send_message(email, recipient_email, Recipient.PERSONAL, "hello", local_id=local_id, sender_queue_id=queue_id)
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
self.assertEqual(events[0]["message"]["display_recipient"][0]["is_mirror_dummy"], False)
self.assertEqual(events[0]["message"]["display_recipient"][1]["is_mirror_dummy"], False)
last_event_id = events[0]["id"]
local_id += 0.01
self.send_message(email, recipient_email, Recipient.PERSONAL, "hello", local_id=local_id, sender_queue_id=queue_id)
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": last_event_id,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
# Test that the received message in the receiver's event queue
# exists and does not contain a local id
recipient_result = self.tornado_call(get_events_backend, recipient_user_profile,
{"queue_id": recipient_queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
recipient_events = ujson.loads(recipient_result.content)["events"]
self.assert_json_success(recipient_result)
self.assertEqual(len(recipient_events), 2)
self.assertEqual(recipient_events[0]["type"], "message")
self.assertEqual(recipient_events[0]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[0])
self.assertEqual(recipient_events[1]["type"], "message")
self.assertEqual(recipient_events[1]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[1])
def test_get_events_narrow(self):
# type: () -> None
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email(email)
self.login(email)
result = self.tornado_call(get_events_backend, user_profile,
{"apply_markdown": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"narrow": ujson.dumps([["stream", "denmark"]]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(result)
queue_id = ujson.loads(result.content)["queue_id"]
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0)
self.send_message(email, "othello@zulip.com", Recipient.PERSONAL, "hello")
self.send_message(email, "Denmark", Recipient.STREAM, "hello")
result = self.tornado_call(get_events_backend, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["display_recipient"], "Denmark")
class EventsRegisterTest(ZulipTestCase):
user_profile = get_user_profile_by_email("hamlet@zulip.com")
bot = get_user_profile_by_email("welcome-bot@zulip.com")
maxDiff = None # type: Optional[int]
def create_bot(self, email):
# type: (str) -> UserProfile
return do_create_user(email, '123',
get_realm_by_string_id('zulip'), 'Test Bot', 'test',
bot_type=UserProfile.DEFAULT_BOT, bot_owner=self.user_profile)
def realm_bot_schema(self, field_name, check):
# type: (str, Validator) -> Validator
return check_dict([
('type', equals('realm_bot')),
('op', equals('update')),
('bot', check_dict([
('email', check_string),
('user_id', check_int),
(field_name, check),
])),
])
def do_test(self, action, event_types=None):
# type: (Callable[[], Any], Optional[List[str]]) -> List[Dict[str, Any]]
client = allocate_client_descriptor(
dict(user_profile_id = self.user_profile.id,
user_profile_email = self.user_profile.email,
realm_id = self.user_profile.realm.id,
event_types = event_types,
client_type_name = "website",
apply_markdown = True,
all_public_streams = False,
queue_timeout = 600,
last_connection_time = time.time(),
narrow = [])
)
# hybrid_state = initial fetch state + re-applying events triggered by our action
# normal_state = do action then fetch at the end (the "normal" code path)
hybrid_state = fetch_initial_state_data(self.user_profile, event_types, "")
action()
events = client.event_queue.contents()
self.assertTrue(len(events) > 0)
apply_events(hybrid_state, events, self.user_profile)
normal_state = fetch_initial_state_data(self.user_profile, event_types, "")
self.match_states(hybrid_state, normal_state)
return events
def assert_on_error(self, error):
# type: (str) -> None
if error:
raise AssertionError(error)
def match_states(self, state1, state2):
# type: (Dict[str, Any], Dict[str, Any]) -> None
def normalize(state):
# type: (Dict[str, Any]) -> None
state['realm_users'] = {u['email']: u for u in state['realm_users']}
for u in state['subscriptions']:
u['subscribers'].sort()
state['subscriptions'] = {u['name']: u for u in state['subscriptions']}
state['unsubscribed'] = {u['name']: u for u in state['unsubscribed']}
if 'realm_bots' in state:
state['realm_bots'] = {u['email']: u for u in state['realm_bots']}
normalize(state1)
normalize(state2)
self.assertEqual(state1, state2)
def test_send_message_events(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('message')),
('flags', check_list(None)),
('message', check_dict([
('avatar_url', check_string),
('client', check_string),
('content', check_string),
('content_type', equals('text/html')),
('display_recipient', check_string),
('gravatar_hash', check_string),
('id', check_int),
('recipient_id', check_int),
('sender_domain', check_string),
('sender_email', check_string),
('sender_full_name', check_string),
('sender_id', check_int),
('sender_short_name', check_string),
('subject', check_string),
('subject_links', check_list(None)),
('timestamp', check_int),
('type', check_string),
])),
])
events = self.do_test(lambda: self.send_message("hamlet@zulip.com", "Verona", Recipient.STREAM, "hello"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = check_dict([
('type', equals('update_message')),
('flags', check_list(None)),
('content', check_string),
('edit_timestamp', check_int),
('flags', check_list(None)),
('message_id', check_int),
('message_ids', check_list(check_int)),
('orig_content', check_string),
('orig_rendered_content', check_string),
('orig_subject', check_string),
('propagate_mode', check_string),
('rendered_content', check_string),
('sender', check_string),
('stream_id', check_int),
('subject', check_string),
('subject_links', check_list(None)),
# There is also a timestamp field in the event, but we ignore it, as
# it's kind of an unwanted but harmless side effect of calling log_event.
])
message = Message.objects.order_by('-id')[0]
topic = 'new_topic'
propagate_mode = 'change_all'
content = 'new content'
rendered_content = render_markdown(message, content)
events = self.do_test(lambda: do_update_message(self.user_profile, message, topic, propagate_mode, content, rendered_content))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_pointer_events(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('pointer')),
('pointer', check_int)
])
events = self.do_test(lambda: do_update_pointer(self.user_profile, 1500))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_register_events(self):
# type: () -> None
realm_user_add_checker = check_dict([
('type', equals('realm_user')),
('op', equals('add')),
('person', check_dict([
('email', check_string),
('full_name', check_string),
('is_admin', check_bool),
('is_bot', check_bool),
])),
])
stream_create_checker = check_dict([
('type', equals('stream')),
('op', equals('create')),
('streams', check_list(check_dict([
('description', check_string),
('invite_only', check_bool),
('name', check_string),
('stream_id', check_int),
])))
])
events = self.do_test(lambda: self.register("test1", "test1"))
error = realm_user_add_checker('events[0]', events[0])
self.assert_on_error(error)
error = stream_create_checker('events[1]', events[1])
self.assert_on_error(error)
def test_alert_words_events(self):
# type: () -> None
alert_words_checker = check_dict([
('type', equals('alert_words')),
('alert_words', check_list(check_string)),
])
events = self.do_test(lambda: do_add_alert_words(self.user_profile, ["alert_word"]))
error = alert_words_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_alert_words(self.user_profile, ["alert_word"]))
error = alert_words_checker('events[0]', events[0])
self.assert_on_error(error)
def test_default_streams_events(self):
# type: () -> None
default_streams_checker = check_dict([
('type', equals('default_streams')),
('default_streams', check_list(check_dict([
('description', check_string),
('invite_only', check_bool),
('name', check_string),
('stream_id', check_int),
]))),
])
events = self.do_test(lambda: do_add_default_stream(self.user_profile.realm, "Scotland"))
error = default_streams_checker('events[0]', events[0])
self.assert_on_error(error)
def test_muted_topics_events(self):
# type: () -> None
muted_topics_checker = check_dict([
('type', equals('muted_topics')),
('muted_topics', check_list(check_list(check_string, 2))),
])
events = self.do_test(lambda: do_set_muted_topics(self.user_profile, [[u"Denmark", u"topic"]]))
error = muted_topics_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_full_name(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict([
('email', check_string),
('full_name', check_string),
])),
])
events = self.do_test(lambda: do_change_full_name(self.user_profile, 'Sir Hamlet'))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_name(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('name')),
('value', check_string),
])
events = self.do_test(lambda: do_set_realm_name(self.user_profile.realm, 'New Realm Name'))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_restricted_to_domain(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('restricted_to_domain')),
('value', check_bool),
])
# The first True is probably a noop, then we get transitions in both directions.
for restricted_to_domain in (True, False, True):
events = self.do_test(lambda: do_set_realm_restricted_to_domain(self.user_profile.realm, restricted_to_domain))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_invite_required(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('invite_required')),
('value', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for invite_required in (False, True, False):
events = self.do_test(lambda: do_set_realm_invite_required(self.user_profile.realm, invite_required))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_authentication_methods(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('default')),
('data', check_dict([])),
])
# Test transitions; any new backends should be tested with T/T/T/F/T
for (auth_method_dict) in \
({'Google': True, 'Email': True, 'GitHub': True, 'LDAP': False, 'Dev': False},
{'Google': True, 'Email': True, 'GitHub': False, 'LDAP': False, 'Dev': False},
{'Google': True, 'Email': False, 'GitHub': False, 'LDAP': False, 'Dev': False},
{'Google': True, 'Email': False, 'GitHub': True, 'LDAP': False, 'Dev': False },
{'Google': False, 'Email': False, 'GitHub': False, 'LDAP': False, 'Dev': True},
{'Google': False, 'Email': False, 'GitHub': True, 'LDAP': False, 'Dev': True},
{'Google': False, 'Email': True, 'GitHub': True, 'LDAP': True, 'Dev': False}):
events = self.do_test(lambda: do_set_realm_authentication_methods(self.user_profile.realm,
auth_method_dict))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_invite_by_admins_only(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('invite_by_admins_only')),
('value', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for invite_by_admins_only in (False, True, False):
events = self.do_test(lambda: do_set_realm_invite_by_admins_only(self.user_profile.realm, invite_by_admins_only))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_default_language(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('default_language')),
('value', check_string),
])
events = self.do_test(lambda: do_set_realm_default_language(self.user_profile.realm, 'de'))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_create_stream_by_admins_only(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('create_stream_by_admins_only')),
('value', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for create_stream_by_admins_only in (False, True, False):
events = self.do_test(lambda: do_set_realm_create_stream_by_admins_only(self.user_profile.realm,
create_stream_by_admins_only))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_pin_stream(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('update')),
('property', equals('pin_to_top')),
('value', check_bool),
])
stream = "Denmark"
sub = get_subscription(stream, self.user_profile)
# The first False is probably a noop, then we get transitions in both directions.
for pinned in (False, True, False):
events = self.do_test(lambda: do_change_subscription_property(self.user_profile, sub, stream, "pin_to_top", pinned))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_message_edit_settings(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('default')),
('data', check_dict([('allow_message_editing', check_bool),
('message_content_edit_limit_seconds', check_int)])),
])
# Test every transition among the four possibilities {T,F} x {0, non-0}
for (allow_message_editing, message_content_edit_limit_seconds) in \
((True, 0), (False, 0), (True, 0), (False, 1234), (True, 0), (True, 1234), (True, 0),
(False, 0), (False, 1234), (False, 0), (True, 1234), (False, 0),
(True, 1234), (True, 600), (False, 600), (False, 1234), (True, 600)):
events = self.do_test(lambda: do_set_realm_message_editing(self.user_profile.realm,
allow_message_editing, message_content_edit_limit_seconds))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_is_admin(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict([
('email', check_string),
('is_admin', check_bool),
])),
])
# The first False is probably a noop, then we get transitions in both directions.
for is_admin in [False, True, False]:
events = self.do_test(lambda: do_change_is_admin(self.user_profile, is_admin))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_twenty_four_hour_time(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('update_display_settings')),
('setting_name', equals('twenty_four_hour_time')),
('user', check_string),
('setting', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for setting_value in [False, True, False]:
events = self.do_test(lambda: do_change_twenty_four_hour_time(self.user_profile, setting_value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_left_side_userlist(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('update_display_settings')),
('setting_name', equals('left_side_userlist')),
('user', check_string),
('setting', check_bool),
])
# The first False is probably a noop, then we get transitions in both directions.
for setting_value in [False, True, False]:
events = self.do_test(lambda: do_change_left_side_userlist(self.user_profile, setting_value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_emoji_events(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm_emoji')),
('op', equals('update')),
('realm_emoji', check_dict([])),
])
events = self.do_test(lambda: check_add_realm_emoji(get_realm_by_string_id("zulip"), "my_emoji",
"https://realm.com/my_emoji"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_realm_emoji(get_realm_by_string_id("zulip"), "my_emoji"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_filter_events(self):
# type: () -> None
schema_checker = check_dict([
('type', equals('realm_filters')),
('realm_filters', check_list(None)), # TODO: validate tuples in the list
])
events = self.do_test(lambda: do_add_realm_filter(get_realm_by_string_id("zulip"), "#(?P<id>[123])",
"https://realm.com/my_realm_filter/%(id)s"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
self.do_test(lambda: do_remove_realm_filter(get_realm_by_string_id("zulip"), "#(?P<id>[123])"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_create_bot(self):
# type: () -> None
bot_created_checker = check_dict([
('type', equals('realm_bot')),
('op', equals('add')),
('bot', check_dict([
('email', check_string),
('user_id', check_int),
('full_name', check_string),
('api_key', check_string),
('default_sending_stream', check_none_or(check_string)),
('default_events_register_stream', check_none_or(check_string)),
('default_all_public_streams', check_bool),
('avatar_url', check_string),
])),
])
action = lambda: self.create_bot('test-bot@zulip.com')
events = self.do_test(action)
error = bot_created_checker('events[1]', events[1])
self.assert_on_error(error)
def test_change_bot_full_name(self):
# type: () -> None
action = lambda: do_change_full_name(self.bot, 'New Bot Name')
events = self.do_test(action)
error = self.realm_bot_schema('full_name', check_string)('events[1]', events[1])
self.assert_on_error(error)
def test_regenerate_bot_api_key(self):
# type: () -> None
action = lambda: do_regenerate_api_key(self.bot)
events = self.do_test(action)
error = self.realm_bot_schema('api_key', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_avatar_source(self):
# type: () -> None
action = lambda: do_change_avatar_source(self.bot, self.bot.AVATAR_FROM_USER)
events = self.do_test(action)
error = self.realm_bot_schema('avatar_url', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_all_public_streams(self):
# type: () -> None
action = lambda: do_change_default_all_public_streams(self.bot, True)
events = self.do_test(action)
error = self.realm_bot_schema('default_all_public_streams', check_bool)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_sending_stream(self):
# type: () -> None
stream = get_stream("Rome", self.bot.realm)
action = lambda: do_change_default_sending_stream(self.bot, stream)
events = self.do_test(action)
error = self.realm_bot_schema('default_sending_stream', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_events_register_stream(self):
# type: () -> None
stream = get_stream("Rome", self.bot.realm)
action = lambda: do_change_default_events_register_stream(self.bot, stream)
events = self.do_test(action)
error = self.realm_bot_schema('default_events_register_stream', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_do_deactivate_user(self):
# type: () -> None
bot_deactivate_checker = check_dict([
('type', equals('realm_bot')),
('op', equals('remove')),
('bot', check_dict([
('email', check_string),
('full_name', check_string),
])),
])
bot = self.create_bot('foo-bot@zulip.com')
action = lambda: do_deactivate_user(bot)
events = self.do_test(action)
error = bot_deactivate_checker('events[1]', events[1])
self.assert_on_error(error)
def test_rename_stream(self):
# type: () -> None
realm = get_realm_by_string_id('zulip')
stream = self.make_stream('old_name')
new_name = u'stream with a brand new name'
self.subscribe_to_stream(self.user_profile.email, stream.name)
action = lambda: do_rename_stream(realm, stream.name, new_name)
events = self.do_test(action)
schema_checker = check_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('email_address')),
('value', check_string),
('name', equals('old_name')),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = check_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('name')),
('value', equals(new_name)),
('name', equals('old_name')),
])
error = schema_checker('events[1]', events[1])
self.assert_on_error(error)
def test_deactivate_stream_neversubscribed(self):
# type: () -> None
stream = self.make_stream('old_name')
action = lambda: do_deactivate_stream(stream)
events = self.do_test(action)
schema_checker = check_dict([
('type', equals('stream')),
('op', equals('delete')),
('streams', check_list(check_dict([]))),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_subscribe_other_user_never_subscribed(self):
# type: () -> None
action = lambda: self.subscribe_to_stream("othello@zulip.com", u"test_stream")
events = self.do_test(action)
schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('peer_add')),
('user_id', check_int),
('subscriptions', check_list(check_string)),
])
error = schema_checker('events[2]', events[2])
self.assert_on_error(error)
def test_subscribe_events(self):
# type: () -> None
subscription_schema_checker = check_list(
check_dict([
('color', check_string),
('description', check_string),
('email_address', check_string),
('invite_only', check_bool),
('in_home_view', check_bool),
('name', check_string),
('desktop_notifications', check_bool),
('audible_notifications', check_bool),
('stream_id', check_int),
('subscribers', check_list(check_int)),
])
)
add_schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('add')),
('subscriptions', subscription_schema_checker),
])
remove_schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('remove')),
('subscriptions', check_list(
check_dict([
('name', equals('test_stream')),
('stream_id', check_int),
]),
)),
])
peer_add_schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('peer_add')),
('user_id', check_int),
('subscriptions', check_list(check_string)),
])
peer_remove_schema_checker = check_dict([
('type', equals('subscription')),
('op', equals('peer_remove')),
('user_id', check_int),
('subscriptions', check_list(check_string)),
])
stream_update_schema_checker = check_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('description')),
('value', check_string),
('name', check_string),
])
action = lambda: self.subscribe_to_stream("hamlet@zulip.com", "test_stream") # type: Callable
events = self.do_test(action, event_types=["subscription", "realm_user"])
error = add_schema_checker('events[0]', events[0])
self.assert_on_error(error)
action = lambda: self.subscribe_to_stream("othello@zulip.com", "test_stream")
events = self.do_test(action)
error = peer_add_schema_checker('events[0]', events[0])
self.assert_on_error(error)
stream = get_stream("test_stream", self.user_profile.realm)
action = lambda: bulk_remove_subscriptions(
[get_user_profile_by_email("othello@zulip.com")],
[stream])
events = self.do_test(action)
error = peer_remove_schema_checker('events[0]', events[0])
self.assert_on_error(error)
action = lambda: bulk_remove_subscriptions(
[get_user_profile_by_email("hamlet@zulip.com")],
[stream])
events = self.do_test(action)
error = remove_schema_checker('events[1]', events[1])
self.assert_on_error(error)
action = lambda: self.subscribe_to_stream("hamlet@zulip.com", "test_stream")
events = self.do_test(action)
error = add_schema_checker('events[1]', events[1])
self.assert_on_error(error)
action = lambda: do_change_stream_description(get_realm_by_string_id('zulip'), 'test_stream', u'new description')
events = self.do_test(action)
error = stream_update_schema_checker('events[0]', events[0])
self.assert_on_error(error)
class FetchInitialStateDataTest(ZulipTestCase):
# Non-admin users don't have access to all bots
def test_realm_bots_non_admin(self):
# type: () -> None
email = 'cordelia@zulip.com'
user_profile = get_user_profile_by_email(email)
self.assertFalse(user_profile.is_realm_admin)
result = fetch_initial_state_data(user_profile, None, "")
self.assert_length(result['realm_bots'], 0)
# additionally the API key for a random bot is not present in the data
api_key = get_user_profile_by_email('notification-bot@zulip.com').api_key
self.assertNotIn(api_key, str(result))
# Admin users have access to all bots in the realm_bots field
def test_realm_bots_admin(self):
# type: () -> None
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
do_change_is_admin(user_profile, True)
self.assertTrue(user_profile.is_realm_admin)
result = fetch_initial_state_data(user_profile, None, "")
self.assertTrue(len(result['realm_bots']) > 5)
from zerver.lib.event_queue import EventQueue
class EventQueueTest(TestCase):
def test_one_event(self):
# type: () -> None
queue = EventQueue("1")
queue.push({"type": "pointer",
"pointer": 1,
"timestamp": "1"})
self.assertFalse(queue.empty())
self.assertEqual(queue.contents(),
[{'id': 0,
'type': 'pointer',
"pointer": 1,
"timestamp": "1"}])
def test_event_collapsing(self):
# type: () -> None
queue = EventQueue("1")
for pointer_val in range(1, 10):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
self.assertEqual(queue.contents(),
[{'id': 8,
'type': 'pointer',
"pointer": 9,
"timestamp": "9"}])
queue = EventQueue("2")
for pointer_val in range(1, 10):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
queue.push({"type": "unknown"})
queue.push({"type": "restart", "server_generation": "1"})
for pointer_val in range(11, 20):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
queue.push({"type": "restart", "server_generation": "2"})
self.assertEqual(queue.contents(),
[{"type": "unknown",
"id": 9,},
{'id': 19,
'type': 'pointer',
"pointer": 19,
"timestamp": "19"},
{"id": 20,
"type": "restart",
"server_generation": "2"}])
for pointer_val in range(21, 23):
queue.push({"type": "pointer",
"pointer": pointer_val,
"timestamp": str(pointer_val)})
self.assertEqual(queue.contents(),
[{"type": "unknown",
"id": 9,},
{'id': 19,
'type': 'pointer',
"pointer": 19,
"timestamp": "19"},
{"id": 20,
"type": "restart",
"server_generation": "2"},
{'id': 22,
'type': 'pointer',
"pointer": 22,
"timestamp": "22"},
])
def test_flag_add_collapsing(self):
# type: () -> None
queue = EventQueue("1")
queue.push({"type": "update_message_flags",
"flag": "read",
"operation": "add",
"all": False,
"messages": [1, 2, 3, 4],
"timestamp": "1"})
queue.push({"type": "update_message_flags",
"flag": "read",
"all": False,
"operation": "add",
"messages": [5, 6],
"timestamp": "1"})
self.assertEqual(queue.contents(),
[{'id': 1,
'type': 'update_message_flags',
"all": False,
"flag": "read",
"operation": "add",
"messages": [1, 2, 3, 4, 5, 6],
"timestamp": "1"}])
def test_flag_remove_collapsing(self):
# type: () -> None
queue = EventQueue("1")
queue.push({"type": "update_message_flags",
"flag": "collapsed",
"operation": "remove",
"all": False,
"messages": [1, 2, 3, 4],
"timestamp": "1"})
queue.push({"type": "update_message_flags",
"flag": "collapsed",
"all": False,
"operation": "remove",
"messages": [5, 6],
"timestamp": "1"})
self.assertEqual(queue.contents(),
[{'id': 1,
'type': 'update_message_flags',
"all": False,
"flag": "collapsed",
"operation": "remove",
"messages": [1, 2, 3, 4, 5, 6],
"timestamp": "1"}])
def test_collapse_event(self):
# type: () -> None
queue = EventQueue("1")
queue.push({"type": "pointer",
"pointer": 1,
"timestamp": "1"})
queue.push({"type": "unknown",
"timestamp": "1"})
self.assertEqual(queue.contents(),
[{'id': 0,
'type': 'pointer',
"pointer": 1,
"timestamp": "1"},
{'id': 1,
'type': 'unknown',
"timestamp": "1"}])
class TestEventsRegisterAllPublicStreamsDefaults(TestCase):
def setUp(self):
# type: () -> None
self.email = 'hamlet@zulip.com'
self.user_profile = get_user_profile_by_email(self.email)
def test_use_passed_all_public_true_default_false(self):
# type: () -> None
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_true_default(self):
# type: () -> None
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_false_default_false(self):
# type: () -> None
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_passed_all_public_false_default_true(self):
# type: () -> None
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_true_default_for_none(self):
# type: () -> None
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertTrue(result)
def test_use_false_default_for_none(self):
# type: () -> None
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertFalse(result)
class TestEventsRegisterNarrowDefaults(TestCase):
def setUp(self):
# type: () -> None
self.email = 'hamlet@zulip.com'
self.user_profile = get_user_profile_by_email(self.email)
self.stream = get_stream('Verona', self.user_profile.realm)
def test_use_passed_narrow_no_default(self):
# type: () -> None
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [[u'stream', u'my_stream']])
self.assertEqual(result, [[u'stream', u'my_stream']])
def test_use_passed_narrow_with_default(self):
# type: () -> None
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [[u'stream', u'my_stream']])
self.assertEqual(result, [[u'stream', u'my_stream']])
def test_use_default_if_narrow_is_empty(self):
# type: () -> None
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [[u'stream', u'Verona']])
def test_use_narrow_if_default_is_none(self):
# type: () -> None
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [])
|
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
from __future__ import absolute_import
from chaco.api import LinePlot, LinearMapper, DataView, ArrayDataSource
from chaco.tools.api import ZoomTool, PanTool
from enable.api import Pointer
from traits.api import Tuple, Bool, on_trait_change
# =============standard library imports ========================
from numpy import hstack
# =============local library imports ==========================
class BaseDataCanvas(DataView):
""" """
# fill_padding = True
# bgcolor = (0.9, 0.9, 1.0)
# bgcolor = (0, 1.0, 0)
# border_visible = True
# use_backbuffer = True
# bgcolor = 'lightblue'
unified_draw = True
x_range = Tuple
y_range = Tuple
view_x_range = Tuple
view_y_range = Tuple
select_pointer = Pointer("hand")
normal_pointer = Pointer("arrow")
cross_pointer = Pointer("cross")
show_axes = Bool(True)
show_grids = Bool(True)
use_zoom = Bool(True)
use_pan = Bool(True)
plot = None
def cmap_plot(self, z):
from chaco.array_plot_data import ArrayPlotData
from chaco.plot import Plot
from chaco.default_colormaps import color_map_name_dict
pd = ArrayPlotData()
pd.set_data("cmapdata", z)
p = Plot(pd, padding=0)
p.img_plot(
"cmapdata",
xbounds=(-25, 25),
ybounds=(-25, 25),
colormap=color_map_name_dict["hot"],
)
self.add(p)
return pd
def line_plot(self, x, y, new_plot=True):
if self.plot is None or new_plot:
if isinstance(x, (float, int)):
x = [x]
if isinstance(y, (float, int)):
y = [y]
self.plot = LinePlot(
index=ArrayDataSource(x),
value=ArrayDataSource(y),
index_mapper=LinearMapper(range=self.index_range),
value_mapper=LinearMapper(range=self.value_range),
)
self.add(self.plot)
else:
datax = self.plot.index.get_data()
datay = self.plot.value.get_data()
nx = hstack((datax, [x]))
ny = hstack((datay, [y]))
self.plot.index.set_data(nx)
self.plot.value.set_data(ny)
def reset_plots(self):
self.plot = None
for c in self.components[:1]:
self.remove(c)
self.request_redraw()
def __init__(self, *args, **kw):
""" """
super(BaseDataCanvas, self).__init__(*args, **kw)
if "x_range" not in kw:
self.x_range = (-25, 25)
if "y_range" not in kw:
self.y_range = (-25, 25)
if "view_x_range" not in kw:
self.view_x_range = (-25, 25)
if "view_y_range" not in kw:
self.view_y_range = (-25, 25)
# plot=BaseXYPlot
plot = LinePlot
sp = plot(
index=ArrayDataSource(self.y_range),
value=ArrayDataSource(self.x_range),
index_mapper=LinearMapper(range=self.index_range),
value_mapper=LinearMapper(range=self.value_range),
)
self.index_range.sources.append(sp.index)
self.value_range.sources.append(sp.value)
sp.visible = False
self.add(sp)
if self.use_zoom:
self.add_zoom()
if self.use_pan:
self.add_pan()
self.index_mapper.on_trait_change(self.update, "updated")
self.value_mapper.on_trait_change(self.update, "updated")
# set the view range
self.set_mapper_limits("x", self.view_x_range)
self.set_mapper_limits("y", self.view_y_range)
# if not self.show_axes:
# self.value_axis.visible = False
# self.index_axis.visible = False
self.value_axis.visible = self.show_axes
self.index_axis.visible = self.show_axes
self.x_grid.visible = self.show_grids
self.y_grid.visible = self.show_grids
@on_trait_change("view_x_range")
def _update_xrange(self):
self.set_mapper_limits("x", self.view_x_range)
@on_trait_change("view_y_range")
def _update_yrange(self):
self.set_mapper_limits("y", self.view_y_range)
@on_trait_change("show_grids")
def change_grid_visibility(self):
try:
self.x_grid.visible = self.show_grids
self.y_grid.visible = self.show_grids
self.request_redraw()
except AttributeError:
pass
def set_mapper_limits(self, mapper, limits, pad=0):
""" """
mapper = getattr(self, "{}_mapper".format(mapper))
if mapper is not None:
mapper.range.low_setting = limits[0] - pad
mapper.range.high_setting = limits[1] + pad
self.request_redraw()
def get_mapper_limits(self, mapper):
mapper = getattr(self, "{}_mapper".format(mapper))
return mapper.range.low, mapper.range.high
def update(self, *args, **kw):
""" """
pass
def add_pan(self):
""" """
p = PanTool(self)
self.tools.append(p)
def add_zoom(self):
""" """
z = ZoomTool(
component=self,
always_on=False,
tool_mode="box",
max_zoom_out_factor=1,
max_zoom_in_factor=10000,
)
# b=BroadcasterTool()
# b.tools.append(z)
self.overlays.append(z)
# self.tools.append(b)
def get_wh(self, *args):
return self._get_wh(*args)
def _get_wh(self, w, h):
""" """
wh, oo = self.map_screen([(w, h), (0, 0)])
w = wh[0] - oo[0]
h = wh[1] - oo[1]
return w, h
def _vertical_line(self, gc, x, y1, y2, color=(0, 0, 0)):
""" """
p1 = (x, y1)
p2 = (x, y2)
self.line_segment(gc, p1, p2, color)
def _horizontal_line(self, gc, y, x1, x2, color=(0, 0, 0)):
""" """
p1 = (x1, y)
p2 = (x2, y)
self.line_segment(gc, p1, p2, color)
def _line_segment(self, gc, p1, p2, color=None):
if color is not None:
gc.set_stroke_color(color)
gc.move_to(*p1)
gc.line_to(*p2)
gc.draw_path()
# def _draw_underlay(self, gc, *args, **kw):
# """
# """
# pass
#
# def _draw_underlay(self, *args, **kw):
# super(BaseDataCanvas, self)._draw_underlay(*args, **kw)
# self._draw_hook(*args, **kw)
# def draw(self, *args, **kw):
# """
# """
#
# super(BaseDataCanvas, self).draw(*args, **kw)
# self._draw_hook(*args, **kw)
# ====================EOF==================
|
|
"""Let's Encrypt client API."""
import logging
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
import OpenSSL
import zope.component
from acme import client as acme_client
from acme import jose
from acme import messages
from letsencrypt import account
from letsencrypt import auth_handler
from letsencrypt import configuration
from letsencrypt import constants
from letsencrypt import continuity_auth
from letsencrypt import crypto_util
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt import le_util
from letsencrypt import reverter
from letsencrypt import revoker
from letsencrypt import storage
from letsencrypt.display import ops as display_ops
from letsencrypt.display import enhancements
logger = logging.getLogger(__name__)
def _acme_from_config_key(config, key):
# TODO: Allow for other alg types besides RS256
return acme_client.Client(new_reg_uri=config.server, key=key,
verify_ssl=(not config.no_verify_ssl))
def register(config, account_storage, tos_cb=None):
"""Register new account with an ACME CA.
This function takes care of generating fresh private key,
registering the account, optionally accepting CA Terms of Service
and finally saving the account. It should be called prior to
initialization of `Client`, unless account has already been created.
:param .IConfig config: Client configuration.
:param .AccountStorage account_storage: Account storage where newly
registered account will be saved to. Save happens only after TOS
acceptance step, so any account private keys or
`.RegistrationResource` will not be persisted if `tos_cb`
returns ``False``.
:param tos_cb: If ACME CA requires the user to accept a Terms of
Service before registering account, client action is
necessary. For example, a CLI tool would prompt the user
acceptance. `tos_cb` must be a callable that should accept
`.RegistrationResource` and return a `bool`: ``True`` iff the
Terms of Service present in the contained
`.Registration.terms_of_service` is accepted by the client, and
``False`` otherwise. ``tos_cb`` will be called only if the
client acction is necessary, i.e. when ``terms_of_service is not
None``. This argument is optional, if not supplied it will
default to automatic acceptance!
:raises letsencrypt.errors.Error: In case of any client problems, in
particular registration failure, or unaccepted Terms of Service.
:raises acme.errors.Error: In case of any protocol problems.
:returns: Newly registered and saved account, as well as protocol
API handle (should be used in `Client` initialization).
:rtype: `tuple` of `.Account` and `acme.client.Client`
"""
# Log non-standard actions, potentially wrong API calls
if account_storage.find_all():
logger.info("There are already existing accounts for %s", config.server)
if config.email is None:
logger.warn("Registering without email!")
# Each new registration shall use a fresh new key
key = jose.JWKRSA(key=jose.ComparableRSAKey(
rsa.generate_private_key(
public_exponent=65537,
key_size=config.rsa_key_size,
backend=default_backend())))
acme = _acme_from_config_key(config, key)
# TODO: add phone?
regr = acme.register(messages.NewRegistration.from_data(email=config.email))
if regr.terms_of_service is not None:
if tos_cb is not None and not tos_cb(regr):
raise errors.Error(
"Registration cannot proceed without accepting "
"Terms of Service.")
regr = acme.agree_to_tos(regr)
acc = account.Account(regr, key)
account.report_new_account(acc, config)
account_storage.save(acc)
return acc, acme
class Client(object):
"""ACME protocol client.
:ivar .IConfig config: Client configuration.
:ivar .Account account: Account registered with `register`.
:ivar .AuthHandler auth_handler: Authorizations handler that will
dispatch DV and Continuity challenges to appropriate
authenticators (providing `.IAuthenticator` interface).
:ivar .IInstaller installer: Installer.
:ivar acme.client.Client acme: Optional ACME client API handle.
You might already have one from `register`.
"""
def __init__(self, config, account_, dv_auth, installer, acme=None):
"""Initialize a client.
:param .IAuthenticator dv_auth: Prepared (`.IAuthenticator.prepare`)
authenticator that can solve the `.constants.DV_CHALLENGES`.
"""
self.config = config
self.account = account_
self.installer = installer
# Initialize ACME if account is provided
if acme is None and self.account is not None:
acme = _acme_from_config_key(config, self.account.key)
self.acme = acme
# TODO: Check if self.config.enroll_autorenew is None. If
# so, set it based to the default: figure out if dv_auth is
# standalone (then default is False, otherwise default is True)
if dv_auth is not None:
cont_auth = continuity_auth.ContinuityAuthenticator(config,
installer)
self.auth_handler = auth_handler.AuthHandler(
dv_auth, cont_auth, self.acme, self.account)
else:
self.auth_handler = None
def _obtain_certificate(self, domains, csr):
"""Obtain certificate.
Internal function with precondition that `domains` are
consistent with identifiers present in the `csr`.
:param list domains: Domain names.
:param .le_util.CSR csr: DER-encoded Certificate Signing
Request. The key used to generate this CSR can be different
than `authkey`.
:returns: `.CertificateResource` and certificate chain (as
returned by `.fetch_chain`).
:rtype: tuple
"""
if self.auth_handler is None:
msg = ("Unable to obtain certificate because authenticator is "
"not set.")
logger.warning(msg)
raise errors.Error(msg)
if self.account.regr is None:
raise errors.Error("Please register with the ACME server first.")
logger.debug("CSR: %s, domains: %s", csr, domains)
authzr = self.auth_handler.get_authorizations(domains)
certr = self.acme.request_issuance(
jose.ComparableX509(OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, csr.data)),
authzr)
return certr, self.acme.fetch_chain(certr)
def obtain_certificate_from_csr(self, csr):
"""Obtain certficiate from CSR.
:param .le_util.CSR csr: DER-encoded Certificate Signing
Request.
:returns: `.CertificateResource` and certificate chain (as
returned by `.fetch_chain`).
:rtype: tuple
"""
return self._obtain_certificate(
# TODO: add CN to domains?
crypto_util.get_sans_from_csr(
csr.data, OpenSSL.crypto.FILETYPE_ASN1), csr)
def obtain_certificate(self, domains):
"""Obtains a certificate from the ACME server.
`.register` must be called before `.obtain_certificate`
:param set domains: domains to get a certificate
:returns: `.CertificateResource`, certificate chain (as
returned by `.fetch_chain`), and newly generated private key
(`.le_util.Key`) and DER-encoded Certificate Signing Request
(`.le_util.CSR`).
:rtype: tuple
"""
# Create CSR from names
key = crypto_util.init_save_key(
self.config.rsa_key_size, self.config.key_dir)
csr = crypto_util.init_save_csr(key, domains, self.config.cert_dir)
return self._obtain_certificate(domains, csr) + (key, csr)
def obtain_and_enroll_certificate(
self, domains, authenticator, installer, plugins):
"""Obtain and enroll certificate.
Get a new certificate for the specified domains using the specified
authenticator and installer, and then create a new renewable lineage
containing it.
:param list domains: Domains to request.
:param authenticator: The authenticator to use.
:type authenticator: :class:`letsencrypt.interfaces.IAuthenticator`
:param installer: The installer to use.
:type installer: :class:`letsencrypt.interfaces.IInstaller`
:param plugins: A PluginsFactory object.
:returns: A new :class:`letsencrypt.storage.RenewableCert` instance
referred to the enrolled cert lineage, or False if the cert could
not be obtained.
"""
certr, chain, key, _ = self.obtain_certificate(domains)
# TODO: remove this dirty hack
self.config.namespace.authenticator = plugins.find_init(
authenticator).name
if installer is not None:
self.config.namespace.installer = plugins.find_init(installer).name
# XXX: We clearly need a more general and correct way of getting
# options into the configobj for the RenewableCert instance.
# This is a quick-and-dirty way to do it to allow integration
# testing to start. (Note that the config parameter to new_lineage
# ideally should be a ConfigObj, but in this case a dict will be
# accepted in practice.)
params = vars(self.config.namespace)
config = {}
cli_config = configuration.RenewerConfiguration(self.config.namespace)
if (cli_config.config_dir != constants.CLI_DEFAULTS["config_dir"] or
cli_config.work_dir != constants.CLI_DEFAULTS["work_dir"]):
logger.warning(
"Non-standard path(s), might not work with crontab installed "
"by your operating system package manager")
# XXX: just to stop RenewableCert from complaining; this is
# probably not a good solution
chain_pem = "" if chain is None else OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, chain)
lineage = storage.RenewableCert.new_lineage(
domains[0], OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, certr.body),
key.pem, chain_pem, params, config, cli_config)
self._report_renewal_status(lineage)
return lineage
def _report_renewal_status(self, cert):
# pylint: disable=no-self-use
"""Informs the user about automatic renewal and deployment.
:param .RenewableCert cert: Newly issued certificate
"""
if ("autorenew" not in cert.configuration
or cert.configuration.as_bool("autorenew")):
if ("autodeploy" not in cert.configuration or
cert.configuration.as_bool("autodeploy")):
msg = "Automatic renewal and deployment has "
else:
msg = "Automatic renewal but not automatic deployment has "
else:
if ("autodeploy" not in cert.configuration or
cert.configuration.as_bool("autodeploy")):
msg = "Automatic deployment but not automatic renewal has "
else:
msg = "Automatic renewal and deployment has not "
msg += ("been enabled for your certificate. These settings can be "
"configured in the directories under {0}.").format(
cert.cli_config.renewal_configs_dir)
reporter = zope.component.getUtility(interfaces.IReporter)
reporter.add_message(msg, reporter.LOW_PRIORITY, True)
def save_certificate(self, certr, chain_cert, cert_path, chain_path):
# pylint: disable=no-self-use
"""Saves the certificate received from the ACME server.
:param certr: ACME "certificate" resource.
:type certr: :class:`acme.messages.Certificate`
:param chain_cert:
:param str cert_path: Candidate path to a certificate.
:param str chain_path: Candidate path to a certificate chain.
:returns: cert_path, chain_path (absolute paths to the actual files)
:rtype: `tuple` of `str`
:raises IOError: If unable to find room to write the cert files
"""
for path in cert_path, chain_path:
le_util.make_or_verify_dir(
os.path.dirname(path), 0o755, os.geteuid())
# try finally close
cert_chain_abspath = None
cert_file, act_cert_path = le_util.unique_file(cert_path, 0o644)
# TODO: Except
cert_pem = OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, certr.body)
try:
cert_file.write(cert_pem)
finally:
cert_file.close()
logger.info("Server issued certificate; certificate written to %s",
act_cert_path)
if chain_cert is not None:
chain_file, act_chain_path = le_util.unique_file(
chain_path, 0o644)
# TODO: Except
chain_pem = OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, chain_cert)
try:
chain_file.write(chain_pem)
finally:
chain_file.close()
logger.info("Cert chain written to %s", act_chain_path)
# This expects a valid chain file
cert_chain_abspath = os.path.abspath(act_chain_path)
return os.path.abspath(act_cert_path), cert_chain_abspath
def deploy_certificate(self, domains, privkey_path, cert_path, chain_path):
"""Install certificate
:param list domains: list of domains to install the certificate
:param str privkey_path: path to certificate private key
:param str cert_path: certificate file path (optional)
:param str chain_path: chain file path
"""
if self.installer is None:
logger.warning("No installer specified, client is unable to deploy"
"the certificate")
raise errors.Error("No installer available")
chain_path = None if chain_path is None else os.path.abspath(chain_path)
for dom in domains:
# TODO: Provide a fullchain reference for installers like
# nginx that want it
self.installer.deploy_cert(
dom, os.path.abspath(cert_path),
os.path.abspath(privkey_path), chain_path)
self.installer.save("Deployed Let's Encrypt Certificate")
# sites may have been enabled / final cleanup
self.installer.restart()
display_ops.success_installation(domains)
def enhance_config(self, domains, redirect=None):
"""Enhance the configuration.
.. todo:: This needs to handle the specific enhancements offered by the
installer. We will also have to find a method to pass in the chosen
values efficiently.
:param list domains: list of domains to configure
:param redirect: If traffic should be forwarded from HTTP to HTTPS.
:type redirect: bool or None
:raises .errors.Error: if no installer is specified in the
client.
"""
if self.installer is None:
logger.warning("No installer is specified, there isn't any "
"configuration to enhance.")
raise errors.Error("No installer available")
if redirect is None:
redirect = enhancements.ask("redirect")
if redirect:
self.redirect_to_ssl(domains)
def redirect_to_ssl(self, domains):
"""Redirect all traffic from HTTP to HTTPS
:param vhost: list of ssl_vhosts
:type vhost: :class:`letsencrypt.interfaces.IInstaller`
"""
for dom in domains:
try:
self.installer.enhance(dom, "redirect")
except errors.PluginError:
logger.warn("Unable to perform redirect for %s", dom)
self.installer.save("Add Redirects")
self.installer.restart()
def validate_key_csr(privkey, csr=None):
"""Validate Key and CSR files.
Verifies that the client key and csr arguments are valid and correspond to
one another. This does not currently check the names in the CSR due to
the inability to read SANs from CSRs in python crypto libraries.
If csr is left as None, only the key will be validated.
:param privkey: Key associated with CSR
:type privkey: :class:`letsencrypt.le_util.Key`
:param .le_util.CSR csr: CSR
:raises .errors.Error: when validation fails
"""
# TODO: Handle all of these problems appropriately
# The client can eventually do things like prompt the user
# and allow the user to take more appropriate actions
# Key must be readable and valid.
if privkey.pem and not crypto_util.valid_privkey(privkey.pem):
raise errors.Error("The provided key is not a valid key")
if csr:
if csr.form == "der":
csr_obj = OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, csr.data)
csr = le_util.CSR(csr.file, OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, csr_obj), "pem")
# If CSR is provided, it must be readable and valid.
if csr.data and not crypto_util.valid_csr(csr.data):
raise errors.Error("The provided CSR is not a valid CSR")
# If both CSR and key are provided, the key must be the same key used
# in the CSR.
if csr.data and privkey.pem:
if not crypto_util.csr_matches_pubkey(
csr.data, privkey.pem):
raise errors.Error("The key and CSR do not match")
def rollback(default_installer, checkpoints, config, plugins):
"""Revert configuration the specified number of checkpoints.
:param int checkpoints: Number of checkpoints to revert.
:param config: Configuration.
:type config: :class:`letsencrypt.interfaces.IConfig`
"""
# Misconfigurations are only a slight problems... allow the user to rollback
installer = display_ops.pick_installer(
config, default_installer, plugins, question="Which installer "
"should be used for rollback?")
# No Errors occurred during init... proceed normally
# If installer is None... couldn't find an installer... there shouldn't be
# anything to rollback
if installer is not None:
installer.rollback_checkpoints(checkpoints)
installer.restart()
def revoke(default_installer, config, plugins, no_confirm, cert, authkey):
"""Revoke certificates.
:param config: Configuration.
:type config: :class:`letsencrypt.interfaces.IConfig`
"""
installer = display_ops.pick_installer(
config, default_installer, plugins, question="Which installer "
"should be used for certificate revocation?")
revoc = revoker.Revoker(installer, config, no_confirm)
# Cert is most selective, so it is chosen first.
if cert is not None:
revoc.revoke_from_cert(cert[0])
elif authkey is not None:
revoc.revoke_from_key(le_util.Key(authkey[0], authkey[1]))
else:
revoc.revoke_from_menu()
def view_config_changes(config):
"""View checkpoints and associated configuration changes.
.. note:: This assumes that the installation is using a Reverter object.
:param config: Configuration.
:type config: :class:`letsencrypt.interfaces.IConfig`
"""
rev = reverter.Reverter(config)
rev.recovery_routine()
rev.view_config_changes()
|
|
"""
Various complex queries that have been problematic in the past.
"""
from __future__ import unicode_literals
import threading
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class DumbCategory(models.Model):
pass
class ProxyCategory(DumbCategory):
class Meta:
proxy = True
@python_2_unicode_compatible
class NamedCategory(DumbCategory):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey(
'self',
models.SET_NULL,
blank=True, null=True,
related_name='children',
)
category = models.ForeignKey(NamedCategory, models.SET_NULL, null=True, default=None)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Note(models.Model):
note = models.CharField(max_length=100)
misc = models.CharField(max_length=10)
tag = models.ForeignKey(Tag, models.SET_NULL, blank=True, null=True)
class Meta:
ordering = ['note']
def __str__(self):
return self.note
def __init__(self, *args, **kwargs):
super(Note, self).__init__(*args, **kwargs)
# Regression for #13227 -- having an attribute that
# is unpicklable doesn't stop you from cloning queries
# that use objects of that type as an argument.
self.lock = threading.Lock()
@python_2_unicode_compatible
class Annotation(models.Model):
name = models.CharField(max_length=10)
tag = models.ForeignKey(Tag, models.CASCADE)
notes = models.ManyToManyField(Note)
def __str__(self):
return self.name
@python_2_unicode_compatible
class ExtraInfo(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note, models.CASCADE, null=True)
value = models.IntegerField(null=True)
class Meta:
ordering = ['info']
def __str__(self):
return self.info
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=10)
num = models.IntegerField(unique=True)
extra = models.ForeignKey(ExtraInfo, models.CASCADE)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Item(models.Model):
name = models.CharField(max_length=10)
created = models.DateTimeField()
modified = models.DateTimeField(blank=True, null=True)
tags = models.ManyToManyField(Tag, blank=True)
creator = models.ForeignKey(Author, models.CASCADE)
note = models.ForeignKey(Note, models.CASCADE)
class Meta:
ordering = ['-note', 'name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Report(models.Model):
name = models.CharField(max_length=10)
creator = models.ForeignKey(Author, models.SET_NULL, to_field='num', null=True)
def __str__(self):
return self.name
class ReportComment(models.Model):
report = models.ForeignKey(Report, models.CASCADE)
@python_2_unicode_compatible
class Ranking(models.Model):
rank = models.IntegerField()
author = models.ForeignKey(Author, models.CASCADE)
class Meta:
# A complex ordering specification. Should stress the system a bit.
ordering = ('author__extra__note', 'author__name', 'rank')
def __str__(self):
return '%d: %s' % (self.rank, self.author.name)
@python_2_unicode_compatible
class Cover(models.Model):
title = models.CharField(max_length=50)
item = models.ForeignKey(Item, models.CASCADE)
class Meta:
ordering = ['item']
def __str__(self):
return self.title
@python_2_unicode_compatible
class Number(models.Model):
num = models.IntegerField()
def __str__(self):
return six.text_type(self.num)
# Symmetrical m2m field with a normal field using the reverse accessor name
# ("valid").
class Valid(models.Model):
valid = models.CharField(max_length=10)
parent = models.ManyToManyField('self')
class Meta:
ordering = ['valid']
# Some funky cross-linked models for testing a couple of infinite recursion
# cases.
class X(models.Model):
y = models.ForeignKey('Y', models.CASCADE)
class Y(models.Model):
x1 = models.ForeignKey(X, models.CASCADE, related_name='y1')
# Some models with a cycle in the default ordering. This would be bad if we
# didn't catch the infinite loop.
class LoopX(models.Model):
y = models.ForeignKey('LoopY', models.CASCADE)
class Meta:
ordering = ['y']
class LoopY(models.Model):
x = models.ForeignKey(LoopX, models.CASCADE)
class Meta:
ordering = ['x']
class LoopZ(models.Model):
z = models.ForeignKey('self', models.CASCADE)
class Meta:
ordering = ['z']
# A model and custom default manager combination.
class CustomManager(models.Manager):
def get_queryset(self):
qs = super(CustomManager, self).get_queryset()
return qs.filter(public=True, tag__name='t1')
@python_2_unicode_compatible
class ManagedModel(models.Model):
data = models.CharField(max_length=10)
tag = models.ForeignKey(Tag, models.CASCADE)
public = models.BooleanField(default=True)
objects = CustomManager()
normal_manager = models.Manager()
def __str__(self):
return self.data
# An inter-related setup with multiple paths from Child to Detail.
class Detail(models.Model):
data = models.CharField(max_length=10)
class MemberManager(models.Manager):
def get_queryset(self):
return super(MemberManager, self).get_queryset().select_related("details")
class Member(models.Model):
name = models.CharField(max_length=10)
details = models.OneToOneField(Detail, models.CASCADE, primary_key=True)
objects = MemberManager()
class Child(models.Model):
person = models.OneToOneField(Member, models.CASCADE, primary_key=True)
parent = models.ForeignKey(Member, models.CASCADE, related_name="children")
# Custom primary keys interfered with ordering in the past.
class CustomPk(models.Model):
name = models.CharField(max_length=10, primary_key=True)
extra = models.CharField(max_length=10)
class Meta:
ordering = ['name', 'extra']
class Related(models.Model):
custom = models.ForeignKey(CustomPk, models.CASCADE, null=True)
class CustomPkTag(models.Model):
id = models.CharField(max_length=20, primary_key=True)
custom_pk = models.ManyToManyField(CustomPk)
tag = models.CharField(max_length=20)
# An inter-related setup with a model subclass that has a nullable
# path to another model, and a return path from that model.
@python_2_unicode_compatible
class Celebrity(models.Model):
name = models.CharField("Name", max_length=20)
greatest_fan = models.ForeignKey("Fan", models.SET_NULL, null=True, unique=True)
def __str__(self):
return self.name
class TvChef(Celebrity):
pass
class Fan(models.Model):
fan_of = models.ForeignKey(Celebrity, models.CASCADE)
# Multiple foreign keys
@python_2_unicode_compatible
class LeafA(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class LeafB(models.Model):
data = models.CharField(max_length=10)
class Join(models.Model):
a = models.ForeignKey(LeafA, models.CASCADE)
b = models.ForeignKey(LeafB, models.CASCADE)
@python_2_unicode_compatible
class ReservedName(models.Model):
name = models.CharField(max_length=20)
order = models.IntegerField()
def __str__(self):
return self.name
# A simpler shared-foreign-key setup that can expose some problems.
@python_2_unicode_compatible
class SharedConnection(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class PointerA(models.Model):
connection = models.ForeignKey(SharedConnection, models.CASCADE)
class PointerB(models.Model):
connection = models.ForeignKey(SharedConnection, models.CASCADE)
# Multi-layer ordering
@python_2_unicode_compatible
class SingleObject(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class RelatedObject(models.Model):
single = models.ForeignKey(SingleObject, models.SET_NULL, null=True)
f = models.IntegerField(null=True)
class Meta:
ordering = ['single']
@python_2_unicode_compatible
class Plaything(models.Model):
name = models.CharField(max_length=10)
others = models.ForeignKey(RelatedObject, models.SET_NULL, null=True)
class Meta:
ordering = ['others']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
name = models.CharField(max_length=20)
created = models.DateTimeField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Eaten(models.Model):
food = models.ForeignKey(Food, models.SET_NULL, to_field="name", null=True)
meal = models.CharField(max_length=20)
def __str__(self):
return "%s at %s" % (self.food, self.meal)
@python_2_unicode_compatible
class Node(models.Model):
num = models.IntegerField(unique=True)
parent = models.ForeignKey("self", models.SET_NULL, to_field="num", null=True)
def __str__(self):
return "%s" % self.num
# Bug #12252
@python_2_unicode_compatible
class ObjectA(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
def __iter__(self):
# Ticket #23721
assert False, 'type checking should happen without calling model __iter__'
class ProxyObjectA(ObjectA):
class Meta:
proxy = True
class ChildObjectA(ObjectA):
pass
@python_2_unicode_compatible
class ObjectB(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA, models.CASCADE)
num = models.PositiveSmallIntegerField()
def __str__(self):
return self.name
class ProxyObjectB(ObjectB):
class Meta:
proxy = True
@python_2_unicode_compatible
class ObjectC(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA, models.SET_NULL, null=True)
objectb = models.ForeignKey(ObjectB, models.SET_NULL, null=True)
childobjecta = models.ForeignKey(ChildObjectA, models.SET_NULL, null=True, related_name='ca_pk')
def __str__(self):
return self.name
@python_2_unicode_compatible
class SimpleCategory(models.Model):
name = models.CharField(max_length=15)
def __str__(self):
return self.name
@python_2_unicode_compatible
class SpecialCategory(SimpleCategory):
special_name = models.CharField(max_length=15)
def __str__(self):
return self.name + " " + self.special_name
@python_2_unicode_compatible
class CategoryItem(models.Model):
category = models.ForeignKey(SimpleCategory, models.CASCADE)
def __str__(self):
return "category item: " + str(self.category)
@python_2_unicode_compatible
class OneToOneCategory(models.Model):
new_name = models.CharField(max_length=15)
category = models.OneToOneField(SimpleCategory, models.CASCADE)
def __str__(self):
return "one2one " + self.new_name
class CategoryRelationship(models.Model):
first = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='first_rel')
second = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='second_rel')
class NullableName(models.Model):
name = models.CharField(max_length=20, null=True)
class Meta:
ordering = ['id']
class ModelD(models.Model):
name = models.TextField()
class ModelC(models.Model):
name = models.TextField()
class ModelB(models.Model):
name = models.TextField()
c = models.ForeignKey(ModelC, models.CASCADE)
class ModelA(models.Model):
name = models.TextField()
b = models.ForeignKey(ModelB, models.SET_NULL, null=True)
d = models.ForeignKey(ModelD, models.CASCADE)
@python_2_unicode_compatible
class Job(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
class JobResponsibilities(models.Model):
job = models.ForeignKey(Job, models.CASCADE, to_field='name')
responsibility = models.ForeignKey('Responsibility', models.CASCADE, to_field='description')
@python_2_unicode_compatible
class Responsibility(models.Model):
description = models.CharField(max_length=20, unique=True)
jobs = models.ManyToManyField(Job, through=JobResponsibilities,
related_name='responsibilities')
def __str__(self):
return self.description
# Models for disjunction join promotion low level testing.
class FK1(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK2(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK3(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class BaseA(models.Model):
a = models.ForeignKey(FK1, models.SET_NULL, null=True)
b = models.ForeignKey(FK2, models.SET_NULL, null=True)
c = models.ForeignKey(FK3, models.SET_NULL, null=True)
@python_2_unicode_compatible
class Identifier(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Program(models.Model):
identifier = models.OneToOneField(Identifier, models.CASCADE)
class Channel(models.Model):
programs = models.ManyToManyField(Program)
identifier = models.OneToOneField(Identifier, models.CASCADE)
class Book(models.Model):
title = models.TextField()
chapter = models.ForeignKey('Chapter', models.CASCADE)
class Chapter(models.Model):
title = models.TextField()
paragraph = models.ForeignKey('Paragraph', models.CASCADE)
class Paragraph(models.Model):
text = models.TextField()
page = models.ManyToManyField('Page')
class Page(models.Model):
text = models.TextField()
class MyObject(models.Model):
parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True, related_name='children')
data = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
# Models for #17600 regressions
@python_2_unicode_compatible
class Order(models.Model):
id = models.IntegerField(primary_key=True)
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
@python_2_unicode_compatible
class OrderItem(models.Model):
order = models.ForeignKey(Order, models.CASCADE, related_name='items')
status = models.IntegerField()
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
class BaseUser(models.Model):
pass
@python_2_unicode_compatible
class Task(models.Model):
title = models.CharField(max_length=10)
owner = models.ForeignKey(BaseUser, models.CASCADE, related_name='owner')
creator = models.ForeignKey(BaseUser, models.CASCADE, related_name='creator')
def __str__(self):
return self.title
@python_2_unicode_compatible
class Staff(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
@python_2_unicode_compatible
class StaffUser(BaseUser):
staff = models.OneToOneField(Staff, models.CASCADE, related_name='user')
def __str__(self):
return self.staff
class Ticket21203Parent(models.Model):
parentid = models.AutoField(primary_key=True)
parent_bool = models.BooleanField(default=True)
created = models.DateTimeField(auto_now=True)
class Ticket21203Child(models.Model):
childid = models.AutoField(primary_key=True)
parent = models.ForeignKey(Ticket21203Parent, models.CASCADE)
class Person(models.Model):
name = models.CharField(max_length=128)
@python_2_unicode_compatible
class Company(models.Model):
name = models.CharField(max_length=128)
employees = models.ManyToManyField(Person, related_name='employers', through='Employment')
def __str__(self):
return self.name
class Employment(models.Model):
employer = models.ForeignKey(Company, models.CASCADE)
employee = models.ForeignKey(Person, models.CASCADE)
title = models.CharField(max_length=128)
# Bug #22429
class School(models.Model):
pass
class Student(models.Model):
school = models.ForeignKey(School, models.CASCADE)
class Classroom(models.Model):
school = models.ForeignKey(School, models.CASCADE)
students = models.ManyToManyField(Student, related_name='classroom')
class Ticket23605AParent(models.Model):
pass
class Ticket23605A(Ticket23605AParent):
pass
class Ticket23605B(models.Model):
modela_fk = models.ForeignKey(Ticket23605A, models.CASCADE)
modelc_fk = models.ForeignKey("Ticket23605C", models.CASCADE)
field_b0 = models.IntegerField(null=True)
field_b1 = models.BooleanField(default=False)
class Ticket23605C(models.Model):
field_c0 = models.FloatField()
# db_table names have capital letters to ensure they are quoted in queries.
class Individual(models.Model):
alive = models.BooleanField()
class Meta:
db_table = 'Individual'
class RelatedIndividual(models.Model):
related = models.ForeignKey(Individual, models.CASCADE, related_name='related_individual')
class Meta:
db_table = 'RelatedIndividual'
|
|
VULNERABILITIES_SEARCH_RESPONSE = {
"vulnerabilities": [
{
"id": 1,
"status": "open",
"closed_at": None,
"created_at": "Kenna",
"due_date": "Kenna",
"notes": "Kenna",
"port": [],
"priority": 1,
"identifiers": [
"Kenna",
"Kenna"
],
"last_seen_time": "Kenna",
"scanner_score": None,
"fix_id": 1,
"scanner_vulnerabilities": [
{
"port": None,
"external_unique_id": "Kenna",
"open": True
},
{
"port": None,
"external_unique_id": "Kenna",
"open": True
}
],
"asset_id": 1,
"connectors": [
{
"id": 1,
"name": "Kenna",
"connector_definition_name": "Kenna",
"vendor": "Kenna"
},
{
"id": 1,
"name": "Kenna",
"connector_definition_name": "Kenna",
"vendor": "Kenna"
}
],
"service_ticket": None,
"urls": {
"asset": "Kenna"
},
"solution": "Kenna",
"patch": True,
"patch_published_at": None,
"cve_id": "Kenna",
"cve_description": "Kenna",
"cve_published_at": "Kenna",
"description": None,
"wasc_id": None,
"severity": 1,
"threat": 1,
"popular_target": True,
"active_internet_breach": True,
"easily_exploitable": True,
"malware_exploitable": True,
"predicted_exploitable": True,
"custom_fields": [],
"first_found_on": "Kenna",
"top_priority": True,
"risk_meter_score": 1,
"closed": True
}
]
}
GET_CONNECTORS_RESPONSE = {
"connectors": [
{
"host": None,
"name": "Nessus XML",
"running": True,
"id": 152075
},
{
"host": None,
"name": "Generic",
"running": True,
"id": 152076
},
{
"host": None,
"name": "Checkmarx XML",
"running": True,
"id": 152077
},
{
"host": "ven01347.service-now.com:443",
"name": "ServiceNow",
"running": True,
"id": 152078
}
]
}
SEARCH_FIXES_RESPONSE = {
"fixes": [
{
"id": 2,
"diagnosis": "Kenna",
"consequence": None,
"solution": "Kenna",
"url": None,
"title": "Kenna",
"vendor": None,
"reference_links": None,
"assets": [
{
"id": 2,
"locator": "Kenna",
"display_locator": "Kenna",
"primary_locator": "Kenna"
}
],
"scanner_ids": [
"Kenna",
"Kenna"
],
"cves": [
"Kenna"
],
"updated_at": "Kenna",
"patch_publication_date": None,
"category": None,
"vuln_count": 2,
"max_vuln_score": 2
}
]
}
SEARCH_ASSETS_RESPONSE = {
"assets": [
{
"id": 3,
"created_at": "Kenna",
"priority": 3,
"operating_system": "Kenna",
"notes": None,
"last_booted_at": None,
"primary_locator": "Kenna",
"locator": "Kenna",
"vulnerabilities_count": 3,
"status": "active",
"last_seen_time": "Kenna",
"network_ports": [],
"tags": [
"Kenna"
],
"owner": None,
"urls": {
"vulnerabilities": "Kenna"
},
"ip_address": "Kenna",
"database": None,
"hostname": None,
"fqdn": None,
"netbios": None,
"application": None,
"file": None,
"mac_address": "Kenna",
"ec2": None,
"url": None,
"external_id": None,
"ipv6": None,
"risk_meter_score": 3,
"asset_groups": [
{
"id": 3,
"name": "Kenna"
},
{
"id": 4,
"name": "Kenna"
}
]
}
]
}
GET_ASSETS_VULNERABILITIES_RESPONSE = {
"vulnerabilities": [
{
"id": 4,
"status": "open",
"closed_at": None,
"created_at": "Kenna",
"due_date": "Kenna",
"notes": None,
"port": [],
"priority": None,
"identifiers": [
"Kenna",
"Kenna"
],
"last_seen_time": "Kenna",
"scanner_score": None,
"fix_id": 4,
"scanner_vulnerabilities": [
{
"port": None,
"external_unique_id": "Kenna",
"open": True
},
{
"port": None,
"external_unique_id": "Kenna",
"open": True
}
],
"asset_id": 4,
"connectors": [
{
"id": 5,
"name": "Kenna",
"connector_definition_name": "Kenna",
"vendor": "Kenna"
},
{
"id": 5,
"name": "Kenna",
"connector_definition_name": "Kenna",
"vendor": "Kenna"
}
],
"service_ticket": None,
"urls": {
"asset": "Kenna"
},
"solution": "Kenna",
"patch": True,
"patch_published_at": None,
"cve_id": "Kenna",
"cve_description": "Kenna",
"cve_published_at": "Kenna",
"description": None,
"wasc_id": None,
"severity": 4,
"threat": 4,
"popular_target": True,
"active_internet_breach": True,
"easily_exploitable": True,
"malware_exploitable": True,
"predicted_exploitable": True,
"custom_fields": [],
"first_found_on": "Kenna",
"top_priority": True,
"risk_meter_score": 4,
"closed": True
}
]
}
GET_CONNECTOR_RUNS_RESPONSE = [
{'id': 1462281,
'start_time': '2020-12-21T06:32:03.000Z',
'end_time': '2020-12-21T07:52:28.000Z',
'success': True, 'total_payload_count': 6819,
'processed_payload_count': 6819,
'failed_payload_count': 0,
'processed_assets_count': 6456,
'assets_with_tags_reset_count': 0,
'processed_scanner_vuln_count': 651063,
'updated_scanner_vuln_count': 21033,
'created_scanner_vuln_count': 0,
'closed_scanner_vuln_count': 0,
'autoclosed_scanner_vuln_count': 0,
'reopened_scanner_vuln_count': 0,
'closed_vuln_count': 0,
'autoclosed_vuln_count': 0,
'reopened_vuln_count': 0
},
{'id': 1460258,
'start_time': '2020-12-20T06:32:05.000Z',
'end_time': '2020-12-20T07:48:42.000Z',
'success': True,
'total_payload_count': 6819,
'processed_payload_count': 6819,
'failed_payload_count': 0,
'processed_assets_count': 6456,
'assets_with_tags_reset_count': 0,
'processed_scanner_vuln_count': 651063,
'updated_scanner_vuln_count': 21033,
'created_scanner_vuln_count': 0,
'closed_scanner_vuln_count': 0,
'autoclosed_scanner_vuln_count': 0,
'reopened_scanner_vuln_count': 0,
'closed_vuln_count': 0,
'autoclosed_vuln_count': 0,
'reopened_vuln_count': 0
}
]
|
|
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For CellStateManager
"""
import time
import mock
from oslo_config import cfg
from oslo_db import exception as db_exc
import six
from nova.cells import state
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import objects
from nova.openstack.common import fileutils
from nova import test
FAKE_COMPUTES = [
('host1', 1024, 100, 0, 0),
('host2', 1024, 100, -1, -1),
('host3', 1024, 100, 1024, 100),
('host4', 1024, 100, 300, 30),
]
FAKE_COMPUTES_N_TO_ONE = [
('host1', 1024, 100, 0, 0),
('host1', 1024, 100, -1, -1),
('host2', 1024, 100, 1024, 100),
('host2', 1024, 100, 300, 30),
]
# NOTE(alaski): It's important to have multiple types that end up having the
# same memory and disk requirements. So two types need the same first value,
# and two need the second and third values to add up to the same thing.
FAKE_ITYPES = [
(0, 0, 0),
(50, 12, 13),
(50, 2, 4),
(10, 20, 5),
]
def _create_fake_node(host, total_mem, total_disk, free_mem, free_disk):
return objects.ComputeNode(host=host,
memory_mb=total_mem,
local_gb=total_disk,
free_ram_mb=free_mem,
free_disk_gb=free_disk)
@classmethod
def _fake_service_get_all_by_binary(cls, context, binary):
def _node(host, total_mem, total_disk, free_mem, free_disk):
return objects.Service(host=host, disabled=False)
return [_node(*fake) for fake in FAKE_COMPUTES]
@classmethod
def _fake_compute_node_get_all(cls, context):
return [_create_fake_node(*fake) for fake in FAKE_COMPUTES]
@classmethod
def _fake_compute_node_n_to_one_get_all(cls, context):
return [_create_fake_node(*fake) for fake in FAKE_COMPUTES_N_TO_ONE]
def _fake_cell_get_all(context):
return []
def _fake_instance_type_all(context):
def _type(mem, root, eph):
return {'root_gb': root,
'ephemeral_gb': eph,
'memory_mb': mem}
return [_type(*fake) for fake in FAKE_ITYPES]
class TestCellsStateManager(test.NoDBTestCase):
def setUp(self):
super(TestCellsStateManager, self).setUp()
self.stubs.Set(objects.ComputeNodeList, 'get_all',
_fake_compute_node_get_all)
self.stubs.Set(objects.ServiceList, 'get_by_binary',
_fake_service_get_all_by_binary)
self.stubs.Set(db, 'flavor_get_all', _fake_instance_type_all)
self.stubs.Set(db, 'cell_get_all', _fake_cell_get_all)
def test_cells_config_not_found(self):
self.flags(cells_config='no_such_file_exists.conf', group='cells')
e = self.assertRaises(cfg.ConfigFilesNotFoundError,
state.CellStateManager)
self.assertEqual(['no_such_file_exists.conf'], e.config_files)
@mock.patch.object(cfg.ConfigOpts, 'find_file')
@mock.patch.object(fileutils, 'read_cached_file')
def test_filemanager_returned(self, mock_read_cached_file, mock_find_file):
mock_find_file.return_value = "/etc/nova/cells.json"
mock_read_cached_file.return_value = (False, six.StringIO({}))
self.flags(cells_config='cells.json', group='cells')
manager = state.CellStateManager()
self.assertIsInstance(manager,
state.CellStateManagerFile)
self.assertRaises(exception.CellsUpdateUnsupported,
manager.cell_create, None, None)
self.assertRaises(exception.CellsUpdateUnsupported,
manager.cell_update, None, None, None)
self.assertRaises(exception.CellsUpdateUnsupported,
manager.cell_delete, None, None)
def test_dbmanager_returned(self):
self.assertIsInstance(state.CellStateManager(),
state.CellStateManagerDB)
def test_capacity_no_reserve(self):
# utilize entire cell
cap = self._capacity(0.0)
cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
units = cell_free_ram / 50
self.assertEqual(units, cap['ram_free']['units_by_mb']['50'])
sz = 25 * 1024
units = 5 # 4 on host 3, 1 on host4
self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)])
def test_capacity_full_reserve(self):
# reserve the entire cell. (utilize zero percent)
cap = self._capacity(100.0)
cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['50'])
sz = 25 * 1024
self.assertEqual(0, cap['disk_free']['units_by_mb'][str(sz)])
def test_capacity_part_reserve(self):
# utilize half the cell's free capacity
cap = self._capacity(50.0)
cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
units = 10 # 10 from host 3
self.assertEqual(units, cap['ram_free']['units_by_mb']['50'])
sz = 25 * 1024
units = 2 # 2 on host 3
self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)])
def _get_state_manager(self, reserve_percent=0.0):
self.flags(reserve_percent=reserve_percent, group='cells')
return state.CellStateManager()
def _capacity(self, reserve_percent):
state_manager = self._get_state_manager(reserve_percent)
my_state = state_manager.get_my_state()
return my_state.capacities
class TestCellsStateManagerNToOne(TestCellsStateManager):
def setUp(self):
super(TestCellsStateManagerNToOne, self).setUp()
self.stubs.Set(objects.ComputeNodeList, 'get_all',
_fake_compute_node_n_to_one_get_all)
def test_capacity_part_reserve(self):
# utilize half the cell's free capacity
cap = self._capacity(50.0)
cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES_N_TO_ONE)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = (1024 *
sum(compute[4] for compute in FAKE_COMPUTES_N_TO_ONE))
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
units = 6 # 6 from host 2
self.assertEqual(units, cap['ram_free']['units_by_mb']['50'])
sz = 25 * 1024
units = 1 # 1 on host 2
self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)])
class TestCellStateManagerException(test.NoDBTestCase):
@mock.patch.object(time, 'sleep')
def test_init_db_error(self, mock_sleep):
class TestCellStateManagerDB(state.CellStateManagerDB):
def __init__(self):
self._cell_data_sync = mock.Mock()
self._cell_data_sync.side_effect = [db_exc.DBError(), []]
super(TestCellStateManagerDB, self).__init__()
test = TestCellStateManagerDB()
mock_sleep.assert_called_once_with(30)
self.assertEqual(test._cell_data_sync.call_count, 2)
class TestCellsGetCapacity(TestCellsStateManager):
def setUp(self):
super(TestCellsGetCapacity, self).setUp()
self.capacities = {"ram_free": 1234}
self.state_manager = self._get_state_manager()
cell = models.Cell(name="cell_name")
other_cell = models.Cell(name="other_cell_name")
cell.capacities = self.capacities
other_cell.capacities = self.capacities
self.stubs.Set(self.state_manager, 'child_cells',
{"cell_name": cell,
"other_cell_name": other_cell})
def test_get_cell_capacity_for_all_cells(self):
self.stubs.Set(self.state_manager.my_cell_state, 'capacities',
self.capacities)
capacities = self.state_manager.get_capacities()
self.assertEqual({"ram_free": 3702}, capacities)
def test_get_cell_capacity_for_the_parent_cell(self):
self.stubs.Set(self.state_manager.my_cell_state, 'capacities',
self.capacities)
capacities = self.state_manager.\
get_capacities(self.state_manager.my_cell_state.name)
self.assertEqual({"ram_free": 3702}, capacities)
def test_get_cell_capacity_for_a_cell(self):
self.assertEqual(self.capacities,
self.state_manager.get_capacities(cell_name="cell_name"))
def test_get_cell_capacity_for_non_existing_cell(self):
self.assertRaises(exception.CellNotFound,
self.state_manager.get_capacities,
cell_name="invalid_cell_name")
class FakeCellStateManager(object):
def __init__(self):
self.called = []
def _cell_data_sync(self, force=False):
self.called.append(('_cell_data_sync', force))
class TestSyncDecorators(test.NoDBTestCase):
def test_sync_before(self):
manager = FakeCellStateManager()
def test(inst, *args, **kwargs):
self.assertEqual(inst, manager)
self.assertEqual(args, (1, 2, 3))
self.assertEqual(kwargs, dict(a=4, b=5, c=6))
return 'result'
wrapper = state.sync_before(test)
result = wrapper(manager, 1, 2, 3, a=4, b=5, c=6)
self.assertEqual(result, 'result')
self.assertEqual(manager.called, [('_cell_data_sync', False)])
def test_sync_after(self):
manager = FakeCellStateManager()
def test(inst, *args, **kwargs):
self.assertEqual(inst, manager)
self.assertEqual(args, (1, 2, 3))
self.assertEqual(kwargs, dict(a=4, b=5, c=6))
return 'result'
wrapper = state.sync_after(test)
result = wrapper(manager, 1, 2, 3, a=4, b=5, c=6)
self.assertEqual(result, 'result')
self.assertEqual(manager.called, [('_cell_data_sync', True)])
|
|
__author__ = 'saeedamen'
#
# Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
CashBacktest
Conducts backtest for strategies trading cash based assets. Reports historical return statistics
and returns time series.
"""
import pandas
import numpy
from pythalesians.timeseries.calcs.timeseriescalcs import TimeSeriesCalcs
from pythalesians.timeseries.calcs.timeseriesdesc import TimeSeriesDesc
from pythalesians.util.loggermanager import LoggerManager
class CashBacktest:
def __init__(self):
self.logger = LoggerManager().getLogger(__name__)
self._pnl = None
self._portfolio = None
return
def calculate_trading_PnL(self, br, asset_a_df, signal_df):
"""
calculate_trading_PnL - Calculates P&L of a trading strategy and statistics to be retrieved later
Parameters
----------
br : BacktestRequest
Parameters for the backtest specifying start date, finish data, transaction costs etc.
asset_a_df : pandas.DataFrame
Asset prices to be traded
signal_df : pandas.DataFrame
Signals for the trading strategy
"""
tsc = TimeSeriesCalcs()
# make sure the dates of both traded asset and signal are aligned properly
asset_df, signal_df = asset_a_df.align(signal_df, join='left', axis = 0)
# only allow signals to change on the days when we can trade assets
signal_df = signal_df.mask(numpy.isnan(asset_df.values)) # fill asset holidays with NaN signals
signal_df = signal_df.fillna(method='ffill') # fill these down
asset_df = asset_df.fillna(method='ffill') # fill down asset holidays
returns_df = tsc.calculate_returns(asset_df)
tc = br.spot_tc_bp
signal_cols = signal_df.columns.values
returns_cols = returns_df.columns.values
pnl_cols = []
for i in range(0, len(returns_cols)):
pnl_cols.append(returns_cols[i] + " / " + signal_cols[i])
# do we have a vol target for individual signals?
if hasattr(br, 'signal_vol_adjust'):
if br.signal_vol_adjust is True:
leverage_df = self.calculate_leverage_factor(returns_df, br.signal_vol_target, br.signal_vol_max_leverage,
br.signal_vol_periods, br.signal_vol_obs_in_year,
br.signal_vol_rebalance_freq)
signal_df = pandas.DataFrame(
signal_df.values * leverage_df.values, index = signal_df.index, columns = signal_df.columns)
self._individual_leverage = leverage_df # contains leverage of individual signal (before portfolio vol target)
_pnl = tsc.calculate_signal_returns_with_tc_matrix(signal_df, returns_df, tc = tc)
_pnl.columns = pnl_cols
# portfolio is average of the underlying signals: should we sum them or average them?
if hasattr(br, 'portfolio_combination'):
if br.portfolio_combination == 'sum':
portfolio = pandas.DataFrame(data = _pnl.sum(axis = 1), index = _pnl.index, columns = ['Portfolio'])
elif br.portfolio_combination == 'mean':
portfolio = pandas.DataFrame(data = _pnl.mean(axis = 1), index = _pnl.index, columns = ['Portfolio'])
else:
portfolio = pandas.DataFrame(data = _pnl.mean(axis = 1), index = _pnl.index, columns = ['Portfolio'])
portfolio_leverage_df = pandas.DataFrame(data = numpy.ones(len(_pnl.index)), index = _pnl.index, columns = ['Portfolio'])
# should we apply vol target on a portfolio level basis?
if hasattr(br, 'portfolio_vol_adjust'):
if br.portfolio_vol_adjust is True:
portfolio, portfolio_leverage_df = self.calculate_vol_adjusted_returns(portfolio, br = br)
self._portfolio = portfolio
self._signal = signal_df # individual signals (before portfolio leverage)
self._portfolio_leverage = portfolio_leverage_df # leverage on portfolio
# multiply portfolio leverage * individual signals to get final position signals
length_cols = len(signal_df.columns)
leverage_matrix = numpy.repeat(portfolio_leverage_df.values.flatten()[numpy.newaxis,:], length_cols, 0)
# final portfolio signals (including signal & portfolio leverage)
self._portfolio_signal = pandas.DataFrame(
data = numpy.multiply(numpy.transpose(leverage_matrix), signal_df.values),
index = signal_df.index, columns = signal_df.columns) / float(length_cols)
self._pnl = _pnl # individual signals P&L
self._tsd_pnl = TimeSeriesDesc()
self._tsd_pnl.calculate_ret_stats(self._pnl, br.ann_factor)
self._portfolio.columns = ['Port']
self._tsd_portfolio = TimeSeriesDesc()
self._tsd_portfolio.calculate_ret_stats(self._portfolio, br.ann_factor)
self._cumpnl = tsc.create_mult_index(self._pnl) # individual signals cumulative P&L
self._cumpnl.columns = pnl_cols
self._cumportfolio = tsc.create_mult_index(self._portfolio) # portfolio cumulative P&L
self._cumportfolio.columns = ['Port']
def calculate_vol_adjusted_index_from_prices(self, prices_df, br):
"""
calculate_vol_adjusted_index_from_price - Adjusts an index of prices for a vol target
Parameters
----------
br : BacktestRequest
Parameters for the backtest specifying start date, finish data, transaction costs etc.
asset_a_df : pandas.DataFrame
Asset prices to be traded
Returns
-------
pandas.Dataframe containing vol adjusted index
"""
tsc = TimeSeriesCalcs()
returns_df, leverage_df = self.calculate_vol_adjusted_returns(prices_df, br, returns = False)
return tsc.create_mult_index(returns_df)
def calculate_vol_adjusted_returns(self, returns_df, br, returns = True):
"""
calculate_vol_adjusted_returns - Adjusts returns for a vol target
Parameters
----------
br : BacktestRequest
Parameters for the backtest specifying start date, finish data, transaction costs etc.
returns_a_df : pandas.DataFrame
Asset returns to be traded
Returns
-------
pandas.DataFrame
"""
tsc = TimeSeriesCalcs()
if not returns: returns_df = tsc.calculate_returns(returns_df)
leverage_df = self.calculate_leverage_factor(returns_df,
br.portfolio_vol_target, br.portfolio_vol_max_leverage,
br.portfolio_vol_periods, br.portfolio_vol_obs_in_year,
br.portfolio_vol_rebalance_freq)
vol_returns_df = tsc.calculate_signal_returns_with_tc_matrix(leverage_df, returns_df, tc = br.spot_tc_bp)
vol_returns_df.columns = returns_df.columns
return vol_returns_df, leverage_df
def calculate_leverage_factor(self, returns_df, vol_target, vol_max_leverage, vol_periods = 60, vol_obs_in_year = 252,
vol_rebalance_freq = 'BM', returns = True, period_shift = 0):
"""
calculate_leverage_factor - Calculates the time series of leverage for a specified vol target
Parameters
----------
returns_df : DataFrame
Asset returns
vol_target : float
vol target for assets
vol_max_leverage : float
maximum leverage allowed
vol_periods : int
number of periods to calculate volatility
vol_obs_in_year : int
number of observations in the year
vol_rebalance_freq : str
how often to rebalance
returns : boolean
is this returns time series or prices?
period_shift : int
should we delay the signal by a number of periods?
Returns
-------
pandas.Dataframe
"""
tsc = TimeSeriesCalcs()
if not returns: returns_df = tsc.calculate_returns(returns_df)
roll_vol_df = tsc.rolling_volatility(returns_df,
periods = vol_periods, obs_in_year = vol_obs_in_year).shift(period_shift)
# calculate the leverage as function of vol target (with max lev constraint)
lev_df = vol_target / roll_vol_df
lev_df[lev_df > vol_max_leverage] = vol_max_leverage
# only allow the leverage change at resampling frequency (eg. monthly 'BM')
lev_df = lev_df.resample(vol_rebalance_freq)
returns_df, lev_df = returns_df.align(lev_df, join='left', axis = 0)
lev_df = lev_df.fillna(method='ffill')
return lev_df
def get_backtest_output(self):
return
def get_pnl(self):
"""
get_pnl - Gets P&L returns
Returns
-------
pandas.Dataframe
"""
return self._pnl
def get_pnl_desc(self):
"""
get_pnl_desc - Gets P&L return statistics in a string format
Returns
-------
str
"""
return self._tsd_signals.summary()
def get_pnl_tsd(self):
"""
get_pnl_tsd - Gets P&L return statistics of individual strategies as class to be queried
Returns
-------
TimeSeriesDesc
"""
return self._tsd_pnl
def get_cumpnl(self):
"""
get_cumpnl - Gets P&L as a cumulative time series of individual assets
Returns
-------
pandas.DataFrame
"""
return self._cumpnl
def get_cumportfolio(self):
"""
get_cumportfolio - Gets P&L as a cumulative time series of portfolio
Returns
-------
pandas.DataFrame
"""
return self._cumportfolio
def get_portfolio_pnl_desc(self):
"""
get_portfolio_pnl_desc - Gets P&L return statistics of portfolio as string
Returns
-------
pandas.DataFrame
"""
return self._tsd_portfolio.summary()
def get_portfolio_pnl_tsd(self):
"""
get_portfolio_pnl_tsd - Gets P&L return statistics of portfolio as class to be queried
Returns
-------
TimeSeriesDesc
"""
return self._tsd_portfolio
def get_individual_leverage(self):
"""
get_individual_leverage - Gets leverage for each asset historically
Returns
-------
pandas.DataFrame
"""
return self._individual_leverage
def get_porfolio_leverage(self):
"""
get_portfolio_leverage - Gets the leverage for the portfolio
Returns
-------
pandas.DataFrame
"""
return self._portfolio_leverage
def get_porfolio_signal(self):
"""
get_portfolio_signal - Gets the signals (with individual leverage & portfolio leverage) for each asset, which
equates to what we would trade in practice
Returns
-------
DataFrame
"""
return self._portfolio_signal
def get_signal(self):
"""
get_signal - Gets the signals (with individual leverage, but excluding portfolio leverage) for each asset
Returns
-------
pandas.DataFrame
"""
return self._signal
if __name__ == '__main__':
# see cashbacktest_examples
pass
|
|
"""Routines related to PyPI, indexes"""
from __future__ import absolute_import
import logging
import cgi
from collections import namedtuple
import itertools
import sys
import os
import re
import mimetypes
import posixpath
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.compat import ipaddress
from pip.utils import (
cached_property, splitext, normalize_path,
ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS,
)
from pip.utils.deprecation import RemovedInPip9Warning, RemovedInPip10Warning
from pip.utils.logging import indent_log
from pip.exceptions import (
DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename,
UnsupportedWheel,
)
from pip.download import HAS_TLS, is_url, path_to_url, url_to_path
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags
from pip._vendor import html5lib, requests, six
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.requests.exceptions import SSLError
__all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', 'PackageFinder']
SECURE_ORIGINS = [
# protocol, hostname, port
# Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
("https", "*", "*"),
("*", "localhost", "*"),
("*", "127.0.0.0/8", "*"),
("*", "::1/128", "*"),
("file", "*", None),
# ssh is always secure.
("ssh", "*", "*"),
]
logger = logging.getLogger(__name__)
class InstallationCandidate(object):
def __init__(self, project, version, location):
self.project = project
self.version = parse_version(version)
self.location = location
self._key = (self.project, self.version, self.location)
def __repr__(self):
return "<InstallationCandidate({0!r}, {1!r}, {2!r})>".format(
self.project, self.version, self.location,
)
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, InstallationCandidate):
return NotImplemented
return method(self._key, other._key)
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(self, find_links, index_urls, allow_all_prereleases=False,
trusted_hosts=None, process_dependency_links=False,
session=None, format_control=None):
"""Create a PackageFinder.
:param format_control: A FormatControl object or None. Used to control
the selection of source packages / binary packages when consulting
the index and links.
"""
if session is None:
raise TypeError(
"PackageFinder() missing 1 required keyword argument: "
"'session'"
)
# Build find_links. If an argument starts with ~, it may be
# a local file relative to a home directory. So try normalizing
# it and if it exists, use the normalized version.
# This is deliberately conservative - it might be fine just to
# blindly normalize anything starting with a ~...
self.find_links = []
for link in find_links:
if link.startswith('~'):
new_link = normalize_path(link)
if os.path.exists(new_link):
link = new_link
self.find_links.append(link)
self.index_urls = index_urls
self.dependency_links = []
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.format_control = format_control or FormatControl(set(), set())
# Domains that we won't emit warnings for when not using HTTPS
self.secure_origins = [
("*", host, "*")
for host in (trusted_hosts if trusted_hosts else [])
]
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
# The Session we'll use to make requests
self.session = session
# If we don't have TLS enabled, then WARN if anyplace we're looking
# relies on TLS.
if not HAS_TLS:
for link in itertools.chain(self.index_urls, self.find_links):
parsed = urllib_parse.urlparse(link)
if parsed.scheme == "https":
logger.warning(
"pip is configured with locations that require "
"TLS/SSL, however the ssl module in Python is not "
"available."
)
break
def add_dependency_links(self, links):
# # FIXME: this shouldn't be global list this, it should only
# # apply to requirements of the package that specifies the
# # dependency_links value
# # FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
warnings.warn(
"Dependency Links processing has been deprecated and will be "
"removed in a future release.",
RemovedInPip9Warning,
)
self.dependency_links.extend(links)
@staticmethod
def _sort_locations(locations, expand_dir=False):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
logger.warning(
"Url '%s' is ignored: it is neither a file "
"nor a directory.", url)
elif is_url(url):
# Only add url with clear scheme
urls.append(url)
else:
logger.warning(
"Url '%s' is ignored. It is either a non-existing "
"path or lacks a specific scheme.", url)
return files, urls
def _candidate_sort_key(self, candidate):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
support_num = len(supported_tags)
if candidate.location.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(candidate.location.filename)
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel for this platform. It "
"can't be sorted." % wheel.filename
)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (candidate.version, pri)
def _validate_secure_origin(self, logger, location):
# Determine if this url used a secure transport mechanism
parsed = urllib_parse.urlparse(str(location))
origin = (parsed.scheme, parsed.hostname, parsed.port)
# The protocol to use to see if the protocol matches.
# Don't count the repository type as part of the protocol: in
# cases such as "git+ssh", only use "ssh". (I.e., Only verify against
# the last scheme.)
protocol = origin[0].rsplit('+', 1)[-1]
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in (SECURE_ORIGINS + self.secure_origins):
if protocol != secure_origin[0] and secure_origin[0] != "*":
continue
try:
# We need to do this decode dance to ensure that we have a
# unicode object, even on Python 2.x.
addr = ipaddress.ip_address(
origin[1]
if (
isinstance(origin[1], six.text_type) or
origin[1] is None
)
else origin[1].decode("utf8")
)
network = ipaddress.ip_network(
secure_origin[1]
if isinstance(secure_origin[1], six.text_type)
else secure_origin[1].decode("utf8")
)
except ValueError:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if (origin[1] and
origin[1].lower() != secure_origin[1].lower() and
secure_origin[1] != "*"):
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port patches
if (origin[2] != secure_origin[2] and
secure_origin[2] != "*" and
secure_origin[2] is not None):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should return True
return True
# If we've gotten to this point, then the origin isn't secure and we
# will not accept it as a valid location to search. We will however
# log a warning that we are ignoring it.
logger.warning(
"The repository located at %s is not a trusted or secure host and "
"is being ignored. If this repository is available via HTTPS it "
"is recommended to use HTTPS instead, otherwise you may silence "
"this warning and allow it anyways with '--trusted-host %s'.",
parsed.hostname,
parsed.hostname,
)
return False
def _get_index_urls_locations(self, project_name):
"""Returns the locations found via self.index_urls
Checks the url_name on the main (first in the list) index and
use this url_name to produce all locations
"""
def mkurl_pypi_url(url):
loc = posixpath.join(
url,
urllib_parse.quote(canonicalize_name(project_name)))
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
return [mkurl_pypi_url(url) for url in self.index_urls]
def find_all_candidates(self, project_name):
"""Find all available InstallationCandidate for project_name
This checks index_urls, find_links and dependency_links.
All versions found are returned as an InstallationCandidate list.
See _link_package_versions for details on which files are accepted
"""
index_locations = self._get_index_urls_locations(project_name)
index_file_loc, index_url_loc = self._sort_locations(index_locations)
fl_file_loc, fl_url_loc = self._sort_locations(
self.find_links, expand_dir=True)
dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links)
file_locations = (
Link(url) for url in itertools.chain(
index_file_loc, fl_file_loc, dep_file_loc)
)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
# We explicitly do not trust links that came from dependency_links
# We want to filter out any thing which does not have a secure origin.
url_locations = [
link for link in itertools.chain(
(Link(url) for url in index_url_loc),
(Link(url) for url in fl_url_loc),
(Link(url) for url in dep_url_loc),
)
if self._validate_secure_origin(logger, link)
]
logger.debug('%d location(s) to search for versions of %s:',
len(url_locations), project_name)
for location in url_locations:
logger.debug('* %s', location)
canonical_name = canonicalize_name(project_name)
formats = fmt_ctl_formats(self.format_control, canonical_name)
search = Search(project_name, canonical_name, formats)
find_links_versions = self._package_versions(
# We trust every directly linked archive in find_links
(Link(url, '-f') for url in self.find_links),
search
)
page_versions = []
for page in self._get_pages(url_locations, project_name):
logger.debug('Analyzing links from page %s', page.url)
with indent_log():
page_versions.extend(
self._package_versions(page.links, search)
)
dependency_versions = self._package_versions(
(Link(url) for url in self.dependency_links), search
)
if dependency_versions:
logger.debug(
'dependency_links found: %s',
', '.join([
version.location.url for version in dependency_versions
])
)
file_versions = self._package_versions(file_locations, search)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.location.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
return (
file_versions + find_links_versions + page_versions +
dependency_versions
)
def find_requirement(self, req, upgrade):
"""Try to find a Link matching req
Expects req, an InstallRequirement and upgrade, a boolean
Returns a Link if found,
Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
"""
all_candidates = self.find_all_candidates(req.name)
# Filter out anything which doesn't match our specifier
compatible_versions = set(
req.specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
[str(c.version) for c in all_candidates],
prereleases=(
self.allow_all_prereleases
if self.allow_all_prereleases else None
),
)
)
applicable_candidates = [
# Again, converting to str to deal with debundling.
c for c in all_candidates if str(c.version) in compatible_versions
]
if applicable_candidates:
best_candidate = max(applicable_candidates,
key=self._candidate_sort_key)
else:
best_candidate = None
if req.satisfied_by is not None:
installed_version = parse_version(req.satisfied_by.version)
else:
installed_version = None
if installed_version is None and best_candidate is None:
logger.critical(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)',
req,
', '.join(
sorted(
set(str(c.version) for c in all_candidates),
key=parse_version,
)
)
)
raise DistributionNotFound(
'No matching distribution found for %s' % req
)
best_installed = False
if installed_version and (
best_candidate is None or
best_candidate.version <= installed_version):
best_installed = True
if not upgrade and installed_version is not None:
if best_installed:
logger.debug(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement',
installed_version,
)
else:
logger.debug(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)',
installed_version,
best_candidate.version,
)
return None
if best_installed:
# We have an existing version, and its the best version
logger.debug(
'Installed version (%s) is most up-to-date (past versions: '
'%s)',
installed_version,
', '.join(sorted(compatible_versions, key=parse_version)) or
"none",
)
raise BestVersionAlreadyInstalled
logger.debug(
'Using version %s (newest of versions: %s)',
best_candidate.version,
', '.join(sorted(compatible_versions, key=parse_version))
)
return best_candidate.location
def _get_pages(self, locations, project_name):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors.
"""
seen = set()
for location in locations:
if location in seen:
continue
seen.add(location)
page = self._get_page(location)
if page is None:
continue
yield page
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search):
result = []
for link in self._sort_links(links):
v = self._link_package_versions(link, search)
if v is not None:
result.append(v)
return result
def _log_skipped_link(self, link, reason):
if link not in self.logged_links:
logger.debug('Skipping link %s; %s', link, reason)
self.logged_links.add(link)
def _link_package_versions(self, link, search):
"""Return an InstallationCandidate or None"""
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
self._log_skipped_link(link, 'not a file')
return
if ext not in SUPPORTED_EXTENSIONS:
self._log_skipped_link(
link, 'unsupported archive format: %s' % ext)
return
if "binary" not in search.formats and ext == wheel_ext:
self._log_skipped_link(
link, 'No binaries permitted for %s' % search.supplied)
return
if "macosx10" in link.path and ext == '.zip':
self._log_skipped_link(link, 'macosx10 one')
return
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
self._log_skipped_link(link, 'invalid wheel filename')
return
if canonicalize_name(wheel.name) != search.canonical:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
if not wheel.supported():
self._log_skipped_link(
link, 'it is not compatible with this Python')
return
version = wheel.version
# This should be up by the search.ok_binary check, but see issue 2700.
if "source" not in search.formats and ext != wheel_ext:
self._log_skipped_link(
link, 'No sources permitted for %s' % search.supplied)
return
if not version:
version = egg_info_matches(egg_info, search.supplied, link)
if version is None:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
self._log_skipped_link(
link, 'Python version is incorrect')
return
logger.debug('Found link %s, version: %s', link, version)
return InstallationCandidate(search.supplied, version, link)
def _get_page(self, link):
return HTMLPage.get_page(link, session=self.session)
def egg_info_matches(
egg_info, search_name, link,
_egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)):
"""Pull the version part out of a string.
:param egg_info: The string to parse. E.g. foo-2.1
:param search_name: The name of the package this belongs to. None to
infer the name. Note that this cannot unambiguously parse strings
like foo-2-2 which might be foo, 2-2 or foo-2, 2.
:param link: The link the string came from, for logging on failure.
"""
match = _egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s', link)
return None
if search_name is None:
full_match = match.group(0)
return full_match[full_match.index('-'):]
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
class HTMLPage(object):
"""Represents one page, along with its URL"""
def __init__(self, content, url, headers=None):
# Determine if we have any encoding information in our headers
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params['charset']
self.content = content
self.parsed = html5lib.parse(
self.content,
encoding=encoding,
namespaceHTMLElements=False,
)
self.url = url
self.headers = headers
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, skip_archives=True, session=None):
if session is None:
raise TypeError(
"get_page() missing 1 required keyword argument: 'session'"
)
url = link.url
url = url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %s URL %s', scheme, link)
return None
try:
if skip_archives:
filename = link.filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(
url, session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
logger.debug('Getting page %s', url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = \
urllib_parse.urlparse(url)
if (scheme == 'file' and
os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
resp = session.get(
url,
headers={
"Accept": "text/html",
"Cache-Control": "max-age=600",
},
)
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
inst = cls(resp.content, resp.url, resp.headers)
except requests.HTTPError as exc:
cls._handle_fail(link, exc, url)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(link, reason, url, meth=logger.info)
except requests.ConnectionError as exc:
cls._handle_fail(link, "connection error: %s" % exc, url)
except requests.Timeout:
cls._handle_fail(link, "timed out", url)
except requests.TooManyRedirects as exc:
cls._handle_fail(
link,
"Error: %s" % exc,
url
)
except Exception as e:
reason = ("There was an unknown error: %s" % e)
cls._handle_fail(
link,
reason,
url
)
else:
return inst
@staticmethod
def _handle_fail(link, reason, url, meth=None):
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
@staticmethod
def _get_content_type(url, session):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in ('http', 'https'):
# FIXME: some warning or something?
# assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@cached_property
def base_url(self):
bases = [
x for x in self.parsed.findall(".//base")
if x.get("href") is not None
]
if bases and bases[0].get("href"):
return bases[0].get("href")
else:
return self.url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
yield Link(url, self)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None):
# url can be a UNC windows share
if url.startswith('\\\\'):
url = path_to_url(url)
self.url = url
self.comes_from = comes_from
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url == other.url
def __ne__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url != other.url
def __lt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url < other.url
def __le__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url <= other.url
def __gt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url > other.url
def __ge__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urllib_parse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
name = urllib_parse.unquote(name)
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urllib_parse.urlsplit(self.url)[0]
@property
def netloc(self):
return urllib_parse.urlsplit(self.url)[1]
@property
def path(self):
return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2])
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url)
return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'[#&]egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)')
@property
def subdirectory_fragment(self):
match = self._subdirectory_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(
r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
)
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def is_wheel(self):
return self.ext == wheel_ext
@property
def is_artifact(self):
"""
Determines if this points to an actual artifact (e.g. a tarball) or if
it points to an "abstract" thing like a path or a VCS location.
"""
from pip.vcs import vcs
if self.scheme in vcs.all_schemes:
return False
return True
FormatControl = namedtuple('FormatControl', 'no_binary only_binary')
"""This object has two fields, no_binary and only_binary.
If a field is falsy, it isn't set. If it is {':all:'}, it should match all
packages except those listed in the other field. Only one field can be set
to {':all:'} at a time. The rest of the time exact package name matches
are listed, with any given package only showing up in one field at a time.
"""
def fmt_ctl_handle_mutual_exclude(value, target, other):
new = value.split(',')
while ':all:' in new:
other.clear()
target.clear()
target.add(':all:')
del new[:new.index(':all:') + 1]
if ':none:' not in new:
# Without a none, we want to discard everything as :all: covers it
return
for name in new:
if name == ':none:':
target.clear()
continue
name = canonicalize_name(name)
other.discard(name)
target.add(name)
def fmt_ctl_formats(fmt_ctl, canonical_name):
result = set(["binary", "source"])
if canonical_name in fmt_ctl.only_binary:
result.discard('source')
elif canonical_name in fmt_ctl.no_binary:
result.discard('binary')
elif ':all:' in fmt_ctl.only_binary:
result.discard('source')
elif ':all:' in fmt_ctl.no_binary:
result.discard('binary')
return frozenset(result)
def fmt_ctl_no_binary(fmt_ctl):
fmt_ctl_handle_mutual_exclude(
':all:', fmt_ctl.no_binary, fmt_ctl.only_binary)
def fmt_ctl_no_use_wheel(fmt_ctl):
fmt_ctl_no_binary(fmt_ctl)
warnings.warn(
'--no-use-wheel is deprecated and will be removed in the future. '
' Please use --no-binary :all: instead.', RemovedInPip10Warning,
stacklevel=2)
Search = namedtuple('Search', 'supplied canonical formats')
"""Capture key aspects of a search.
:attribute supplied: The user supplied package.
:attribute canonical: The canonical package name.
:attribute formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
"""
|
|
# -*- coding: utf-8
# 'version': '0.3'
#
# Copyright (c) 2017, Stephen B, Hope, All rights reserved.
#
# CommAI-env Copyright (c) 2016-present, Facebook, Inc., All rights reserved.
# Round1 Copyright (c) 2017-present, GoodAI All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE_CHALLENGE file in the root directory of this source tree.
import random
import unittest
from src.fsa import build_automaton
class TestFSA(unittest.TestCase):
"""
"""
def assertGeneratedStrings(self, obj):
"""
:param obj:
:return:
"""
for i in range(100):
self.assertTrue(obj.is_string_correct(obj.get_correct_string(random.randint(1, 20))))
self.assertFalse(obj.is_string_correct(obj.get_wrong_string(random.randint(1, 20), 0)))
def test_fsa_1_1(self):
"""
:return:
"""
obj = build_automaton("C", "and")
self.assertTrue(obj.is_string_correct("C"))
self.assertTrue(obj.is_string_correct("CCC"))
self.assertTrue(obj.is_string_correct("CCCCCCCCC"))
self.assertFalse(obj.is_string_correct("CCY"))
self.assertFalse(obj.is_string_correct("ABGD"))
self.assertFalse(obj.is_string_correct(""))
self.assertGeneratedStrings(obj)
def test_fsa_1_2(self):
"""
:return:
"""
obj = build_automaton("C", "or")
self.assertTrue(obj.is_string_correct("C"))
self.assertTrue(obj.is_string_correct("CCC"))
self.assertTrue(obj.is_string_correct("CCCCCCCCC"))
self.assertFalse(obj.is_string_correct("CCY"))
self.assertFalse(obj.is_string_correct("ABGD"))
self.assertTrue(obj.is_string_correct(""))
self.assertGeneratedStrings(obj)
def test_fsa_1_3(self):
"""
:return:
"""
obj = build_automaton("AB", "and")
self.assertTrue(obj.is_string_correct("AB"))
self.assertTrue(obj.is_string_correct("ABABAB"))
self.assertFalse(obj.is_string_correct("ABABABAA"))
self.assertFalse(obj.is_string_correct("AXBGD"))
self.assertGeneratedStrings(obj)
def test_fsa_1_4(self):
"""
:return:
"""
obj = build_automaton("FJG", "and")
self.assertTrue(obj.is_string_correct("FJG"))
self.assertTrue(obj.is_string_correct("FJGFJGFJG"))
self.assertTrue(obj.is_string_correct("FJGFJGFJGFJGFJGFJG"))
self.assertFalse(obj.is_string_correct("FFF"))
self.assertFalse(obj.is_string_correct("EFGTR"))
self.assertFalse(obj.is_string_correct(""))
self.assertGeneratedStrings(obj)
def test_fsa_1_5(self):
"""
:return:
"""
obj = build_automaton("XYX", "or")
self.assertTrue(obj.is_string_correct("XYX"))
self.assertTrue(obj.is_string_correct("XYXXYXXYX"))
self.assertTrue(obj.is_string_correct("XYXYXYXXYX"))
self.assertFalse(obj.is_string_correct("XYXY"))
self.assertFalse(obj.is_string_correct("OUAJHFLJAH"))
self.assertTrue(obj.is_string_correct(""))
self.assertGeneratedStrings(obj)
def test_fsa_1_6(self):
""" # even odd length should be "true"
:return:
"""
obj = build_automaton("XX", "and")
self.assertTrue(obj.is_string_correct("XXX"))
self.assertTrue(obj.is_string_correct("XXXX"))
self.assertFalse(obj.is_string_correct("XXY"))
self.assertFalse(obj.is_string_correct("ABGD"))
self.assertGeneratedStrings(obj)
def test_fsa_2_1(self):
"""# self.assertGeneratedStrings(obj) # cannot generate a wrong string in this case
:return:
"""
obj = build_automaton("anything", "or")
self.assertTrue(obj.is_string_correct("X"))
self.assertTrue(obj.is_string_correct("Y"))
self.assertTrue(obj.is_string_correct("ABC"))
self.assertTrue(obj.is_string_correct("XYYYYYABCYXXXYXYXYYYYABC"))
self.assertTrue(obj.is_string_correct("XXYZABC"))
self.assertTrue(obj.is_string_correct("ABCGDTRW"))
def test_fsa_2_2(self):
"""
:return:
"""
obj = build_automaton("AB CD", "or")
self.assertTrue(obj.is_string_correct("AB"))
self.assertTrue(obj.is_string_correct("CD"))
self.assertTrue(obj.is_string_correct("ABABAB"))
self.assertFalse(obj.is_string_correct("CDA"))
self.assertFalse(obj.is_string_correct("ABGDTRW"))
self.assertGeneratedStrings(obj)
def test_fsa_2_3(self):
"""
:return:
"""
obj = build_automaton("FAB GG MIL", "or")
self.assertTrue(obj.is_string_correct("FAB"))
self.assertTrue(obj.is_string_correct("GG"))
self.assertTrue(obj.is_string_correct("MIL"))
self.assertTrue(obj.is_string_correct("FABFAB"))
self.assertTrue(obj.is_string_correct("FABGGGMIL"))
self.assertFalse(obj.is_string_correct("FABGGGMILFA"))
self.assertFalse(obj.is_string_correct("ABCGDTRW"))
self.assertGeneratedStrings(obj)
def test_fsa_2_4(self):
"""
:return:
"""
obj = build_automaton("X Y", "or")
self.assertTrue(obj.is_string_correct("X"))
self.assertTrue(obj.is_string_correct("Y"))
self.assertTrue(obj.is_string_correct("XYYYYYYXXXYXYXYYYY"))
self.assertFalse(obj.is_string_correct("XXYZ"))
self.assertFalse(obj.is_string_correct("ABGDTRW"))
self.assertGeneratedStrings(obj)
def test_fsa_2_5(self):
"""
:return:
"""
obj = build_automaton("X Y ABC", "or")
self.assertTrue(obj.is_string_correct("X"))
self.assertTrue(obj.is_string_correct("Y"))
self.assertTrue(obj.is_string_correct("ABC"))
self.assertTrue(obj.is_string_correct("XYYYYYABCYXXXYXYXYYYYABC"))
self.assertFalse(obj.is_string_correct("XXYZABC"))
self.assertFalse(obj.is_string_correct("ABCGDTRW"))
self.assertGeneratedStrings(obj)
def test_fsa_2_6(self):
"""
:return:
"""
obj = build_automaton("C CAB ABC", "or")
self.assertTrue(obj.is_string_correct("CABCAB"))
self.assertTrue(obj.is_string_correct("ABCABC"))
self.assertTrue(obj.is_string_correct("ABCAB"))
self.assertTrue(obj.is_string_correct("CABCABCCCC"))
self.assertFalse(obj.is_string_correct("ABCABCA"))
self.assertFalse(obj.is_string_correct("ABCGDTRW"))
self.assertGeneratedStrings(obj)
def test_fsa_2_7(self):
"""
:return:
"""
obj = build_automaton("ZJA J Y", "or")
self.assertTrue(obj.is_string_correct("JYJYJ"))
self.assertTrue(obj.is_string_correct("JJJJJJJJ"))
self.assertTrue(obj.is_string_correct("YZJAZJA"))
self.assertTrue(obj.is_string_correct("ZJAJY"))
self.assertFalse(obj.is_string_correct("YJZJ"))
self.assertFalse(obj.is_string_correct("ZJAJD"))
self.assertGeneratedStrings(obj)
def test_fsa_3_1(self):
"""
:return:
"""
obj = build_automaton("AB CF", "and")
self.assertTrue(obj.is_string_correct("ABABABCF"))
self.assertTrue(obj.is_string_correct("ABCFABCFCF"))
self.assertTrue(obj.is_string_correct("CFCFCFAB"))
self.assertFalse(obj.is_string_correct("CFCF"))
self.assertFalse(obj.is_string_correct("ABABABAB"))
self.assertFalse(obj.is_string_correct("AB"))
self.assertFalse(obj.is_string_correct("ABCAFAB"))
self.assertFalse(obj.is_string_correct("AOPYQEG"))
self.assertGeneratedStrings(obj)
def test_fsa_3_2(self):
"""
:return:
"""
obj = build_automaton("HL RM BT", "and")
self.assertTrue(obj.is_string_correct("RMBTBTHLHLBT"))
self.assertTrue(obj.is_string_correct("HLRMBT"))
self.assertTrue(obj.is_string_correct("BTRMHLHL"))
self.assertFalse(obj.is_string_correct("BTRMHLHLL"))
self.assertFalse(obj.is_string_correct("BTBTRMRM"))
self.assertFalse(obj.is_string_correct("HL"))
self.assertFalse(obj.is_string_correct("HLRMBBT"))
self.assertFalse(obj.is_string_correct("AOPYQEG"))
self.assertGeneratedStrings(obj)
def test_fsa_3_3(self):
"""
:return:
"""
obj = build_automaton("GLE EA ABC", "and")
self.assertTrue(obj.is_string_correct("GLEABCEA"))
self.assertTrue(obj.is_string_correct("GLEABC"))
self.assertTrue(obj.is_string_correct("GLEABCGLEA"))
self.assertFalse(obj.is_string_correct("GLEEA"))
self.assertFalse(obj.is_string_correct("GLEABCA"))
self.assertFalse(obj.is_string_correct("ABCABCABCGLE"))
self.assertFalse(obj.is_string_correct("AOPYQEG"))
self.assertGeneratedStrings(obj)
def test_fsa_3_4(self):
"""
:return:
"""
obj = build_automaton("AB anything", "and")
self.assertTrue(obj.is_string_correct("FKGABJJKJKSD"))
self.assertTrue(obj.is_string_correct("GLEABC"))
self.assertTrue(obj.is_string_correct("GLEABCGLEA"))
self.assertTrue(obj.is_string_correct("ABABAB"))
self.assertTrue(obj.is_string_correct("AB")) # this has to be confirmed with Tomas/Marco
self.assertFalse(obj.is_string_correct("GLEEA"))
self.assertFalse(obj.is_string_correct("GLEAACA"))
self.assertFalse(obj.is_string_correct("AOPYQEG"))
self.assertGeneratedStrings(obj)
def test_fsa_3_5(self):
"""
:return:
"""
obj = build_automaton("AB CF anything", "and")
self.assertTrue(obj.is_string_correct("FJGKJKJKJKJDCFCFDJKJKJKSJAB"))
self.assertTrue(obj.is_string_correct("ABCF"))
self.assertTrue(obj.is_string_correct("GLEABCGLEACF"))
self.assertTrue(obj.is_string_correct("ABABABCFCF"))
self.assertTrue(obj.is_string_correct("ABCF"))
self.assertFalse(obj.is_string_correct("GLEEAB"))
self.assertFalse(obj.is_string_correct("GLEAACFA"))
self.assertFalse(obj.is_string_correct("AOPYQEG"))
self.assertGeneratedStrings(obj)
def test_fsa_4_1(self):
"""
:return:
"""
obj = build_automaton("not AB anything", "and")
self.assertTrue(obj.is_string_correct("ADFCFHGHADDDB"))
self.assertTrue(obj.is_string_correct("FJGKJKJKJKJDCFCFDJKJKJKSJA"))
self.assertTrue(obj.is_string_correct("ACF"))
self.assertTrue(obj.is_string_correct("GLEACGLEACF"))
self.assertTrue(obj.is_string_correct("AACFCF"))
self.assertTrue(obj.is_string_correct("ACF"))
self.assertFalse(obj.is_string_correct("GLEEAB"))
self.assertFalse(obj.is_string_correct("GLEABACFA"))
self.assertFalse(obj.is_string_correct("ABOPYQEG"))
self.assertGeneratedStrings(obj)
def test_fsa_4_2(self):
"""
:return:
"""
obj = build_automaton("not AB CF anything", "and")
self.assertTrue(obj.is_string_correct("DJFKJKJSCFDSFG"))
self.assertTrue(obj.is_string_correct("FJGKJKJKJKJDCFCFDJKJKJKSJA"))
self.assertTrue(obj.is_string_correct("ACF"))
self.assertTrue(obj.is_string_correct("GLEACGLEACF"))
self.assertTrue(obj.is_string_correct("AACFCF"))
self.assertTrue(obj.is_string_correct("ACF"))
self.assertFalse(obj.is_string_correct("GLEEABCF"))
self.assertFalse(obj.is_string_correct("GLEABACFA"))
self.assertFalse(obj.is_string_correct("ABOPYQEG"))
self.assertGeneratedStrings(obj)
def test_fsa_4_3(self):
"""
:return:
"""
obj = build_automaton("not AB not CF anything", "and")
self.assertTrue(obj.is_string_correct("DJFKJKJSCEFDSFG"))
self.assertTrue(obj.is_string_correct("FJGKJKJKJKJDCAFCAFDJKJKJKSJA"))
self.assertTrue(obj.is_string_correct("ACDF"))
self.assertTrue(obj.is_string_correct("GLEACGLEAC"))
self.assertTrue(obj.is_string_correct("AACAFB"))
self.assertTrue(obj.is_string_correct("AC"))
self.assertFalse(obj.is_string_correct("GLEEACF"))
self.assertFalse(obj.is_string_correct("GLEABACA"))
self.assertFalse(obj.is_string_correct("GLEABACFA"))
self.assertFalse(obj.is_string_correct("ABOPYQEG"))
self.assertGeneratedStrings(obj)
def test_fsa_generator_1(self):
"""
:return:
"""
obj = build_automaton("C", "and")
for ind in range(100):
self.assertFalse(obj.is_string_correct(obj.get_wrong_string(random.randint(1, 20), 1)))
def test_fsa_generator_2(self):
"""
:return:
"""
obj = build_automaton("AB CF ABC", "or")
for ind in range(100):
string = obj.get_correct_string(random.randint(1, 20))
self.assertTrue(obj.is_string_correct(string))
def test_fsa_generator_3(self):
"""
:return:
"""
obj = build_automaton("AB CF ABC", "and")
for ind in range(100):
string = obj.get_correct_string(random.randint(1, 20))
self.assertTrue(obj.is_string_correct(string))
def test_fsa_generator_4(self):
"""
:return:
"""
obj = build_automaton("MNO KL not CF not ABC anything", "and")
for ind in range(100):
string = obj.get_correct_string(random.randint(1, 20))
self.assertTrue(obj.is_string_correct(string))
"""
# obj = build_automaton("AB anything", "or") # this is not handled
# obj = build_automaton("AB CF", "and")
# for ind in range(1000):
# string = obj.get_wrong_string(0)
# assert(not obj.is_string_correct(string))
"""
|
|
# Copyright 2007 The Spitfire Authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import __builtin__
import copy
import logging
import os.path
import re
from spitfire.compiler import ast
from spitfire.compiler import analyzer
from spitfire.compiler import walker
builtin_names = vars(__builtin__)
_BINOP_INVALID_COUNT = 1000 # Any value > 0 will work.
_BINOP_INITIAL_COUNT = 0
_BINOP_FIRST_PASS = 1
# Utility functions for searching up the tree.
def _get_parent_node_by_pred(node, pred, search_current=False):
"""Find the first parent node that satisfies a predicate function."""
if not search_current:
node = node.parent
while node is not None:
if pred(node):
return node
node = node.parent
return None
def _get_parent_node_by_type(node, node_type):
"""Find the first parent node that satisfies a specified type or types."""
return _get_parent_node_by_pred(node, lambda n: isinstance(n, node_type))
def _get_parent_loop(node):
return _get_parent_node_by_type(node, ast.ForNode)
def _get_parent_block(node):
return _get_parent_node_by_type(
node, (ast.FunctionNode, ast.ForNode, ast.IfNode, ast.ElseNode))
def _get_local_identifiers(node):
local_identifiers = []
partial_local_identifiers = []
dirty_local_identifiers = []
# search the parent scopes
# fixme: should this be recursive?
node = node.parent
while node is not None:
if isinstance(node, ast.ForNode):
local_identifiers.extend(node.loop_variant_set)
local_identifiers.extend(node.scope.local_identifiers)
partial_local_identifiers.extend(
node.scope.partial_local_identifiers)
dirty_local_identifiers.extend(node.scope.dirty_local_identifiers)
elif isinstance(node, ast.IfNode):
local_identifiers.extend(node.scope.local_identifiers)
partial_local_identifiers.extend(
node.scope.partial_local_identifiers)
dirty_local_identifiers.extend(node.scope.dirty_local_identifiers)
elif isinstance(node, ast.ElseNode):
# in this case, we don't want to go to the parent node, which is the
# ast.IfNode - we want to go to the parent scope
local_identifiers.extend(node.scope.local_identifiers)
partial_local_identifiers.extend(
node.scope.partial_local_identifiers)
dirty_local_identifiers.extend(node.scope.dirty_local_identifiers)
node = node.parent.parent
continue
elif isinstance(node, ast.FunctionNode):
local_identifiers.extend(node.scope.local_identifiers)
partial_local_identifiers.extend(
node.scope.partial_local_identifiers)
dirty_local_identifiers.extend(node.scope.dirty_local_identifiers)
break
node = node.parent
return (frozenset(local_identifiers), frozenset(partial_local_identifiers),
frozenset(dirty_local_identifiers))
def _get_identifiers_from_expression(node):
"""Find the IdentifierNodes present in an expression.
This function searches through the nodes of an expression and returns a set
of the IdentiferNodes that are present. This function doesn't traverse
ast.GetAttrNode or any LiteralNodes such as ListLiteral or DictLiteral
nodes.
"""
return set([n
for n in walker.flatten_tree(node)
if isinstance(n, ast.IdentifierNode)])
def _is_clean(node, scope):
"""Determine if the node references any dirty identifiers in the scope.
If there are any Identifiers in the node that are considered "dirty", this
function will return False. This is because it is not safe to hoist a node
that depends on modifications made within its parent scope.
"""
dirty_identifiers = scope.dirty_local_identifiers
node_identifiers = _get_identifiers_from_expression(node)
# There are no dirty idenitifiers in the node being hoisted.
return not node_identifiers & dirty_identifiers
def _get_common_aliased_expression_map(*scopes):
"""Get the common clean aliased expression map for multiple scopes."""
if not scopes:
return {}
clean_key_sets = []
for scope in scopes:
clean_scope_keys = set()
for alias in scope.aliased_expression_map.iterkeys():
if _is_clean(alias, scope):
clean_scope_keys.add(alias)
clean_key_sets.append(clean_scope_keys)
common_clean_keys = reduce(lambda x, y: x & y, clean_key_sets)
return dict([(key, scopes[0].aliased_expression_map[key])
for key in common_clean_keys])
# Utility functions for generating names.
def _generate_filtered_placeholder(node):
"""Given a node, generate a name for the cached filtered placeholder"""
return '_fph%08X' % ast.unsigned_hash(node)
def _generate_cached_resolved_placeholder(node):
"""Given a node, generate a name for the cached udn placeholder"""
return '_rudn%08X' % ast.unsigned_hash(node)
class _BaseAnalyzer(object):
def __init__(self, ast_root, options, compiler):
self.ast_root = ast_root
self.options = options
self.compiler = compiler
self.unoptimized_node_types = set()
# Used as a flag to determine how many binops we have analyzed.
self.binop_count = _BINOP_INVALID_COUNT
def optimize_ast(self):
self.visit_ast(self.ast_root)
if self.options.debug:
print "unoptimized_node_types", self.unoptimized_node_types
return self.ast_root
# build an AST node list from a single parse node
# need the parent in case we are going to delete a node
def visit_ast(self, node, parent=None):
node.parent = parent
method_name = 'analyze%s' % node.__class__.__name__
method = getattr(self, method_name, self.default_optimize_node)
if method_name in self.compiler.debug_flags:
print method_name, node
return method(node)
def skip_analyze_node(self, node):
return
analyzeLiteralNode = skip_analyze_node
analyzeIdentifierNode = skip_analyze_node
analyzeTargetNode = skip_analyze_node
def default_optimize_node(self, node):
# print "default_optimize_node", type(node)
self.unoptimized_node_types.add(type(node))
return
# this function has some rules that are a bit unclean - you aren't actually
# looking for the 'parent' scope, but one you might insert nodes into. for
# instance, you skip over a ast.ForNode so that optimizetions are inserted
# in a loop-invariant fashion.
def get_parent_scope(self, node):
node_stack = [node]
node = node.parent
while node is not None:
if type(node) == ast.FunctionNode:
return node.scope
elif type(node) == ast.IfNode:
# elements of the test clause need to reference the next scope
# "up" - usually the function, but could be another conditional
# block fixme: if we ever implement "elif" this will have to get
# fixed up
if node_stack[-1] != node.test_expression:
return node.scope
elif type(node) == ast.ElseNode:
return node.scope
elif type(node) == ast.ForNode:
if node_stack[-1] != node.expression_list:
return node.scope
node_stack.append(node)
node = node.parent
self.compiler.error(
analyzer.SemanticAnalyzerError("expected a parent function"),
pos=node_stack[-1].pos)
def get_insert_block_and_point(self, node):
original_node = node
insert_marker = node
node = node.parent
while node is not None:
if isinstance(node, (ast.FunctionNode, ast.ForNode, ast.IfNode,
ast.ElseNode)):
if insert_marker in node.child_nodes:
return node, insert_marker
insert_marker = node
node = node.parent
self.compiler.error(
analyzer.SemanticAnalyzerError("expected a parent block"),
pos=node.pos)
def replace_in_parent_block(self, node, new_node):
insert_block, insert_marker = self.get_insert_block_and_point(node)
insert_block.replace(insert_marker, new_node)
def reanalyzeConditionalNode(self, conditional_node):
if (not self.options.hoist_conditional_aliases and
not self.options.cache_filtered_placeholders):
return
parent_node = conditional_node
parent_block, insertion_point = self.get_insert_block_and_point(
conditional_node)
if self.options.hoist_conditional_aliases:
#print "reanalyzeConditionalNode", conditional_node
#print " parent_block", parent_block
#print " parent_scope", parent_block.scope
# NOTE: need to iterate over items, in case we modify something
items = conditional_node.scope.aliased_expression_map.items()
for alias_node, alias in items:
#print " check alias:", alias
#print " alias_node:", alias_node
assign_alias_node = ast.AssignNode(alias,
alias_node,
pos=alias_node.pos)
if alias_node in parent_block.scope.aliased_expression_map:
if self._is_condition_invariant(alias_node,
conditional_node):
#print " hoist:", assign_alias_node
self.hoist(conditional_node, parent_block,
insertion_point, alias_node,
assign_alias_node)
def reanalyzeLoopNode(self, loop_node):
if not self.options.hoist_loop_invariant_aliases:
return
parent_block, insertion_point = self.get_insert_block_and_point(
loop_node)
# NOTE: need to iterate over items, in case we modify something
for alias_node, alias in loop_node.scope.aliased_expression_map.items():
assign_alias = ast.AssignNode(alias, alias_node, pos=alias_node.pos)
if alias_node in parent_block.scope.aliased_expression_map:
if self._is_loop_invariant(alias_node, loop_node):
self.hoist(loop_node, parent_block, insertion_point,
alias_node, assign_alias)
else:
# if this alias is not already used in the parent scope, that's
# ok, hoist it if it's loop invariant
if self._is_loop_invariant(alias_node, loop_node):
loop_node.remove(assign_alias)
parent_block.insert_before(loop_node, assign_alias)
parent_block.scope.hoisted_aliases.append(alias_node)
def _is_condition_invariant(self, node, conditional_node):
"""The _is_condition_invariant_legacy function is broken, but seems to
be correct in some cases. Out of fear and redundancy, in order for
something to be hoisted, it must pass the old and new tests.
"""
return (self._is_condition_invariant_legacy(node, conditional_node) and
_is_clean(node, conditional_node.scope))
def _is_condition_invariant_legacy(self, node, conditional_node):
node_dependency_set = self.get_node_dependencies(node)
condition_invariant = (
not node_dependency_set & conditional_node.scope.local_identifiers)
#print "is_condition_invariant:", condition_invariant
#print " locals:", conditional_node.scope.local_identifiers
#print " deps:", node_dependency_set
return condition_invariant
def _is_loop_invariant(self, node, loop_node):
"""The _is_loop_invariant_legacy function is broken, but seems to be
correct in some cases. Out of fear and redundancy, in order for
something to be hoisted, it must pass the old and new tests.
"""
return (self._is_loop_invariant_legacy(node, loop_node) and
_is_clean(node, loop_node.scope))
def _is_loop_invariant_legacy(self, node, loop_node):
node_dependency_set = self.get_node_dependencies(node)
# print "is loop invariant node:", node
# for x in node_dependency_set:
# print " dep:", x
# find dependencies within the loop node but outside the node we're
# checking
node_dependency_set_except_node_tree = node_dependency_set - set(
walker.flatten_tree(node))
dependencies_within_loop = set(walker.flatten_tree(
loop_node)).intersection(node_dependency_set_except_node_tree)
depends_on_loop_variants = bool(loop_node.loop_variant_set.intersection(
node_dependency_set))
# TODO: Disabling warnings for now. They are useless witout
# filenames. Also need to make sure all these cases are valid.
# if not depends_on_loop_variants and dependencies_within_loop:
# # we can't assume this is invariant because it depends on other
# # nodes inside the loop. eventually we should hoist out both the
# # node and its dependencies.
# dependency_nodes = '\n'.join(' %s' % node.parent
# for node in dependencies_within_loop)
# logging.warning("Cannot hoist possible loop invariant: %s.", node)
# logging.warning("Please move following dependencies "
# "out of the loop:\n%s", dependency_nodes)
return not depends_on_loop_variants and not dependencies_within_loop
def get_node_dependencies(self, node):
node_dependency_set = set(walker.flatten_tree(node))
parent_block = _get_parent_block(node)
for n in list(node_dependency_set):
# when this is an identifier, you need to check all of the potential
# the dependencies for that symbol, which means doing some crawling
if isinstance(n, ast.IdentifierNode):
identifier = n
parent_block_to_check = parent_block
while parent_block_to_check:
for block_node in parent_block_to_check.child_nodes:
if isinstance(block_node, ast.AssignNode):
if block_node.left == identifier:
node_dependency_set.update(
self.get_node_dependencies(
block_node.right))
parent_block_to_check = None
break
elif isinstance(block_node, ast.IfNode):
# if you encounter a conditional in your chain, you
# depend on any dependencies of the condition itself
# FIXME: calling
# get_node_dependencies(block_node.test_expression)
# causes an infinite loop, but that is probably the
# correct way forward to address the dependency
# chain
node_dependency_set.update(walker.flatten_tree(
block_node.test_expression))
else:
parent_block_to_check = _get_parent_block(
parent_block_to_check)
#elif isinstance(n, (ast.GetUDNNode, ast.FilterNode)):
# node_dependency_set.update(
# self.get_node_dependencies(node.expression))
#print "get_node_dependencies", node
#print " deps:", node_dependency_set
return node_dependency_set
class OptimizationAnalyzer(_BaseAnalyzer):
def analyzeParameterNode(self, parameter):
self.visit_ast(parameter.default, parameter)
return
def analyzeTemplateNode(self, template):
# If we have a function registry, add in the nodes that we determined
# are used from the analyzer stage before we begin optimizing.
for alias in sorted(self.compiler.function_name_registry):
if alias not in template.used_function_registry_identifiers:
continue
fq_name, method = self.compiler.function_name_registry[alias]
fq_name_parts = fq_name.split('.')
self.ast_root.from_nodes.append(ast.FromNode(
[ast.IdentifierNode(x) for x in fq_name_parts[:-1]
], ast.IdentifierNode(fq_name_parts[-1]), ast.IdentifierNode(
alias)))
for n in template.from_nodes:
if n.alias:
template.global_identifiers.add(n.alias)
else:
template.global_identifiers.add(n.identifier)
# scan extends for dependencies
# this allows faster calling of template functions - we could also
# tune ast.BufferWrite calls for these nodes
if self.options.use_dependency_analysis:
for n in template.extends_nodes:
path = os.path.join(
*[ident_node.name
for ident_node in n.source_module_name_list])
template_function_names = get_template_functions(
self.compiler.include_path, path)
template.template_methods.update(template_function_names)
self.visit_ast(template.main_function, template)
for n in template.child_nodes:
self.visit_ast(n, template)
def analyzeFunctionNode(self, function):
function.scope.local_identifiers.update([ast.IdentifierNode(
n.name) for n in function.parameter_list])
# Some binops optimzations can only be done in functions.
self.binop_count = _BINOP_INITIAL_COUNT
for n in function.child_nodes:
self.visit_ast(n, function)
self.binop_count = _BINOP_INVALID_COUNT
def analyzeForNode(self, for_node):
self.visit_ast(for_node.target_list, for_node)
for_node.loop_variant_set = set(for_node.target_list.flat_list)
self.visit_ast(for_node.expression_list, for_node)
for n in for_node.child_nodes:
self.visit_ast(n, for_node)
def analyzeAssignNode(self, node):
scope = self.get_parent_scope(node)
local_identifiers, _, dirty_identifiers = _get_local_identifiers(node)
# This is an assignment at an index.
if isinstance(node.left, ast.SliceNode):
_identifier = ast.IdentifierNode(node.left.expression.name,
pos=node.pos)
scope.dirty_local_identifiers.add(_identifier)
if _identifier not in local_identifiers:
self.compiler.error(
analyzer.SemanticAnalyzerError(
'Expression %s being indexed must be defined before use'
% _identifier.name),
pos=node.pos)
else:
_identifier = ast.IdentifierNode(node.left.name, pos=node.pos)
alias_name = _generate_filtered_placeholder(_identifier)
if alias_name in scope.alias_name_set:
if self.options.double_assign_error:
self.compiler.error(
analyzer.SemanticAnalyzerError(
'Multiple assignment of %s' % _identifier.name),
pos=node.pos)
else:
self.compiler.warn('Multiple assignment of %s' %
_identifier.name,
pos=node.pos)
scope.local_identifiers.add(_identifier)
# note: this hack is here so you can partially analyze alias nodes
# without double-processing
if node.right:
self.visit_ast(node.right, node)
def analyzeExpressionListNode(self, expression_list_node):
for n in expression_list_node:
self.visit_ast(n, expression_list_node)
def analyzeTargetListNode(self, target_list_node):
flat_list = []
for n in target_list_node:
self.visit_ast(n, target_list_node)
if type(n) == ast.TargetListNode:
flat_list.extend(n.flat_list)
else:
flat_list.append(n)
target_list_node.flat_list = flat_list
# def analyzeParameterListNode(self, parameter_list_node):
# flat_list = []
# for n in parameter_list_node:
# flat_list.append(n)
# target_list_node.flat_list = flat_list
def analyzeArgListNode(self, arg_list_node):
scope = self.get_parent_scope(arg_list_node)
for n in arg_list_node:
# If an identifier is passed into a function and we are not in a
# filter node, mark it as dirty. Filter nodes are always written
# out, therefore we don't consider the final call a modification.
# This assumption is predictaed on the fact that you can't modify a
# variable once it has been written.
if not _get_parent_node_by_type(arg_list_node, ast.FilterNode):
if isinstance(n, ast.PlaceholderNode):
scope.dirty_local_identifiers.add(ast.IdentifierNode(
n.name))
if isinstance(n, ast.ParameterNode) and isinstance(
n.default, ast.PlaceholderNode):
scope.dirty_local_identifiers.add(ast.IdentifierNode(
n.default.name))
self.visit_ast(n, arg_list_node)
def analyzeTupleLiteralNode(self, tuple_literal_node):
for n in tuple_literal_node.child_nodes:
self.visit_ast(n, tuple_literal_node)
def analyzeDictLiteralNode(self, dict_literal_node):
for key_node, value_node in dict_literal_node.child_nodes:
self.visit_ast(key_node, dict_literal_node)
self.visit_ast(value_node, dict_literal_node)
def analyzeListLiteralNode(self, list_literal_node):
for n in list_literal_node.child_nodes:
self.visit_ast(n, list_literal_node)
def analyzeDoNode(self, do_node):
self.visit_ast(do_node.expression, do_node)
def analyzeCallFunctionNode(self, function_call):
# If the ast.CallFunctionNode is in the test expression of an
# ast.IfNode, do not wrap the function with a SanitizedPlaceholder.
def is_in_test_expression(node):
return (isinstance(node.parent, ast.IfNode) and
node == node.parent.test_expression)
if _get_parent_node_by_pred(function_call,
is_in_test_expression,
search_current=True):
function_call.sanitization_state = ast.SanitizedState.NOT_OUTPUTTED
# If the ast.CallFunctionNode is in a ast.DoNode, do not wrap the
# function with a SanitizedPlaceholder.
if _get_parent_node_by_type(function_call, ast.DoNode):
function_call.sanitization_state = ast.SanitizedState.NOT_OUTPUTTED
self.visit_ast(function_call.expression, function_call)
self.visit_ast(function_call.arg_list, function_call)
# NOTE: these optimizations are disabled because the optimizer has a
# tendency to "over-hoist" code inside a ast.CacheNode and you end up doing
# *more* work
def analyzeCacheNode(self, cache_node):
cache_placeholders = self.options.cache_resolved_placeholders
cache_udn_expressions = self.options.cache_resolved_udn_expressions
cache_filtered_placeholders = self.options.cache_filtered_placeholders
self.options.cache_resolved_placeholders = False
self.options.cache_resolved_udn_expressions = False
self.options.cache_filtered_placeholders = False
self.visit_ast(cache_node.expression, cache_node)
self.options.cache_resolved_placeholders = cache_placeholders
self.options.cache_resolved_udn_expressions = cache_udn_expressions
self.options.cache_filtered_placeholders = cache_filtered_placeholders
def analyzeBufferWrite(self, buffer_write):
self.visit_ast(buffer_write.expression, buffer_write)
# template functions output text - don't format them as strings
if (isinstance(buffer_write.expression, ast.BinOpNode) and
buffer_write.expression.operator == '%' and
isinstance(buffer_write.expression.right, ast.CallFunctionNode)
and isinstance(buffer_write.expression.right.expression,
ast.TemplateMethodIdentifierNode)):
buffer_write.replace(buffer_write.expression,
buffer_write.expression.right)
def analyzeEchoNode(self, node):
for n in (node.test_expression, node.true_expression,
node.false_expression):
if n:
self.visit_ast(n, node)
def analyzeFilterNode(self, filter_node):
self.visit_ast(filter_node.expression, filter_node)
if isinstance(filter_node.expression, ast.CallFunctionNode):
# If a ast.FilterNode has a ast.CallFunctionNode as an expression,
# let the filter node handle determining if filtering should occur,
# rather than the sanitization wrapper.
filter_node.expression.sanitization_state = (
ast.SanitizedState.OUTPUTTED_IMMEDIATELY)
if (isinstance(filter_node.expression, ast.CallFunctionNode) and
isinstance(filter_node.expression.expression,
ast.TemplateMethodIdentifierNode)):
filter_node.parent.replace(filter_node, filter_node.expression)
return
# A ast.CallFunctionNode will require passing in both the value and the
# function to the filter_function. If the ast.CallFunctionNode's
# expression is a ast.GetUDNNode, we can avoid looking up the attribute
# twice by caching its value. This is also true if the
# ast.CallFunctionNode's expression is an ast.IdentifierNode with a '.'
# in the name.
if isinstance(filter_node.expression, ast.CallFunctionNode):
fn_node = filter_node.expression
if (self.options.cache_resolved_udn_expressions and
isinstance(fn_node.expression, ast.IdentifierNode) and
'.' in fn_node.expression.name):
scope = self.get_parent_scope(filter_node)
udn_node = fn_node.expression
# For some udn style node, create a cached variable. ex.
# _rudn12345
alias_name = _generate_cached_resolved_placeholder(udn_node)
alias = ast.IdentifierNode(alias_name, pos=filter_node.pos)
scope.local_identifiers.add(alias)
# Create an assignment node that assigns the udn node to the
# ast.IdentifierNode.
assign_alias = ast.AssignNode(alias,
udn_node,
pos=filter_node.pos)
insert_block, insert_marker = self.get_insert_block_and_point(
filter_node)
# Insert the assignment before the ast.FilterNode
insert_block.insert_before(insert_marker, assign_alias)
# Replace the udn node in the ast.CallFunctionNode with the
# alias.
filter_node.expression.replace(udn_node, alias)
if self.options.cache_filtered_placeholders:
# NOTE: you *must* analyze the node before putting it in a dict
# otherwise the definition of hash and equivalence will change and
# the node will not be found due to the sketchy custom hash function
scope = self.get_parent_scope(filter_node)
alias = scope.aliased_expression_map.get(filter_node)
if not alias:
alias_name = _generate_filtered_placeholder(
filter_node.expression)
if alias_name in scope.alias_name_set:
print "duplicate alias_name", alias_name
print "scope", scope
print "scope.alias_name_set", scope.alias_name_set
print " ".join("scope.aliased_expression_map",
scope.aliased_expression_map)
return
alias = ast.IdentifierNode(alias_name, pos=filter_node.pos)
scope.alias_name_set.add(alias_name)
scope.aliased_expression_map[filter_node] = alias
assign_alias = ast.AssignNode(alias,
filter_node,
pos=filter_node.pos)
insert_block, insert_marker = self.get_insert_block_and_point(
filter_node)
insert_block.insert_before(insert_marker, assign_alias)
filter_node.parent.replace(filter_node, alias)
def _placeholdernode_replacement(self, placeholder, local_var,
cached_placeholder, local_identifiers):
"""This function tries to replace a ast.PlaceholderNode with a node type
that does not need to be resolved such as an ast.IdentifierNode or a
cached placeholder.
"""
if local_var in local_identifiers:
placeholder.parent.replace(placeholder, local_var)
elif placeholder.name in self.ast_root.template_methods:
placeholder.parent.replace(
placeholder, ast.TemplateMethodIdentifierNode(placeholder.name))
elif local_var in self.ast_root.global_identifiers:
placeholder.parent.replace(placeholder, local_var)
elif cached_placeholder in local_identifiers:
placeholder.parent.replace(placeholder, cached_placeholder)
elif local_var.name in builtin_names:
placeholder.parent.replace(placeholder,
ast.IdentifierNode(local_var.name))
elif self.options.cache_resolved_placeholders:
scope = self.get_parent_scope(placeholder)
scope.alias_name_set.add(cached_placeholder.name)
scope.aliased_expression_map[placeholder] = cached_placeholder
insert_block, insert_marker = self.get_insert_block_and_point(
placeholder)
# note: this is sketchy enough that it requires some explanation
# basically, you need to visit the node for the parent function to
# get the memo that this value is aliased. unfortunately, the naive
# case of just calling visit_ast blows up since it tries to double
# analyze a certain set of nodes. you only really need to analyze
# that the assignment took place, then you can safely alias the
# actual function call. definitely sketchy, but it does seem to work
assign_rph = ast.AssignNode(cached_placeholder,
None,
pos=placeholder.pos)
cached_placeholder.parent = assign_rph
#print "optimize scope:", insert_block
#print "optimize marker:", insert_marker
insert_block.insert_before(insert_marker, assign_rph)
self.visit_ast(assign_rph, insert_block)
assign_rph.right = placeholder
placeholder.parent.replace(placeholder, cached_placeholder)
def analyzePlaceholderNode(self, placeholder):
if self.options.directly_access_defined_variables:
# when the analyzer finds a ast.PlaceholderNode and generates a
# function call out of it, i annotate an ast.IdentifierNode with the
# original placeholder name
local_var = ast.IdentifierNode(placeholder.name,
pos=placeholder.pos)
cached_placeholder = ast.IdentifierNode('_rph_%s' % local_var.name,
pos=placeholder.pos)
(local_identifiers, partial_local_identifiers, _) = (
_get_local_identifiers(placeholder))
attrs = set([ast.IdentifierNode(node.name)
for node in self.ast_root.attr_nodes])
non_local_identifiers = (
partial_local_identifiers - local_identifiers - attrs)
if (self.options.static_analysis and
local_var in non_local_identifiers):
self.compiler.error(
analyzer.SemanticAnalyzerError(
('Variable %s is not guaranteed to be in scope. '
'Define the variable in all branches of the '
'conditional or before the conditional.') % local_var),
pos=placeholder.pos)
# print "local_identifiers", local_identifiers
self._placeholdernode_replacement(
placeholder, local_var, cached_placeholder, local_identifiers)
def analyzePlaceholderSubstitutionNode(self, placeholder_substitution):
self.visit_ast(placeholder_substitution.expression,
placeholder_substitution)
# def alias_expression_in_function(self, function, expression):
# alias = function.aliased_expression_map.get(expression)
# if not alias:
# alias_name = '_%s' % (expression.name)
# if alias_name in function.alias_name_set:
# print "duplicate alias_name", alias_name
# return
# alias = ast.IdentifierNode(alias_name)
# function.aliased_expression_map[expression] = alias
# assign_alias = ast.AssignNode(alias, expression)
# parent_loop = _get_parent_loop(node)
# # fixme: check to see if this expression is loop-invariant
# # must add a test case for this
# child_node_set = set(node.getChildNodes())
# #print "child_node_set", child_node_set
# #print "parent_loop", parent_loop, "parent", node.parent
# if (parent_loop is not None and
# not parent_loop.loop_variant_set.intersection(child_node_set)):
# #print "pull up loop invariant", assign_alias
# parent_loop.parent.insert_before(parent_loop, assign_alias)
# else:
# insert_block, insert_marker = self.get_insert_block_and_point(node)
# insert_block.insert_before(insert_marker, assign_alias)
# node.parent.replace(node, alias)
def analyzeGetAttrNode(self, node):
if not self.options.alias_invariants:
return
# fixme: only handle the trivial case for now
# simplifies the protocol for making up alias names
if type(node.expression) != ast.IdentifierNode:
return
scope = self.get_parent_scope(node)
alias = scope.aliased_expression_map.get(node)
if not alias:
if node.expression.name[0] != '_':
alias_format = '_%s_%s'
else:
alias_format = '%s_%s'
alias_name = alias_format % (node.expression.name, node.name)
if alias_name in scope.alias_name_set:
print "duplicate alias_name", alias_name
print "scope", scope
print "scope.alias_name_set", scope.alias_name_set
print " ".join("scope.aliased_expression_map",
scope.aliased_expression_map)
return
alias = ast.IdentifierNode(alias_name)
scope.alias_name_set.add(alias_name)
scope.aliased_expression_map[node] = alias
assign_alias = ast.AssignNode(alias, node)
parent_loop = _get_parent_loop(node)
# fixme: check to see if this expression is loop-invariant
# must add a test case for this
child_node_set = set(node.getChildNodes())
#print "child_node_set", child_node_set
#print "parent_loop", parent_loop, "parent", node.parent
if (self.options.inline_hoist_loop_invariant_aliases and
parent_loop is not None and
not parent_loop.loop_variant_set.intersection(
child_node_set)):
# print "pull up loop invariant", assign_alias
parent_loop.parent.insert_before(parent_loop, assign_alias)
else:
insert_block, insert_marker = self.get_insert_block_and_point(
node)
insert_block.insert_before(insert_marker, assign_alias)
node.parent.replace(node, alias)
def analyzeIfNode(self, if_node):
self.visit_ast(if_node.test_expression, if_node)
for n in if_node.child_nodes:
self.visit_ast(n, if_node)
for n in if_node.else_.child_nodes:
self.visit_ast(n, if_node.else_)
parent_scope = self.get_parent_scope(if_node)
if_scope_vars = if_node.scope.local_identifiers
# once both branches are optimized, walk the scopes for any variables
# that are defined in both places. those will be promoted to function
# scope since it is safe to assume that those will defined fixme: this
# feels like a bit of hack - but not sure how to do this correctly
# without reverting to slower performance for almost all calls to
# resolve_placeholder.
#
# it seems like certain optimizations need to be hoisted up to the
# parent scope. this is particularly the case when you are aliasing
# common functions that are likely to occur in the parent scope after
# the conditional block. you *need* to hoist those, or you will have
# errors when the branch fails. essentially you have to detect and hoist
# 'branch invariant' optimizations.
#
# TODO: we can try to hoist up invariants if they don't depend on the
# condition. this is somewhat hard to know, so the best way to do so
# without multiple passes of the optimizer is to hoist only things that
# were already defined in the parent scope - like _buffer, or things on
# self.
if if_node.else_.child_nodes:
common_local_identifiers = (if_scope_vars
& if_node.else_.scope.local_identifiers)
# The set of nodes that are not defined in both the if and else
# branches.
partial_local_identifiers = (
(if_scope_vars ^ if_node.else_.scope.local_identifiers)
| if_node.scope.partial_local_identifiers
| if_node.else_.scope.partial_local_identifiers)
common_alias_name_set = (if_node.scope.alias_name_set
& if_node.else_.scope.alias_name_set)
# Only promote aliased expressions to the parent scope when the
# alias would be used in both the if and else branches.
common_aliased_expression_map = _get_common_aliased_expression_map(
if_node.scope, if_node.else_.scope)
parent_scope.local_identifiers.update(common_local_identifiers)
parent_scope.alias_name_set.update(common_alias_name_set)
parent_scope.aliased_expression_map.update(
common_aliased_expression_map)
else:
partial_local_identifiers = if_scope_vars
non_parent_scope_identifiers = (
partial_local_identifiers - parent_scope.local_identifiers)
parent_scope.partial_local_identifiers.update(
non_parent_scope_identifiers)
# Any variable considered dirty in an if or else block should be dirty
# in the parent scope as well.
if_dirty_identifiers = if_node.scope.dirty_local_identifiers
else_dirty_identifiers = if_node.else_.scope.dirty_local_identifiers
parent_scope.dirty_local_identifiers.update(if_dirty_identifiers)
parent_scope.dirty_local_identifiers.update(else_dirty_identifiers)
def analyzeBinOpNode(self, n):
# if you are trying to use short-circuit behavior, these two
# optimizations can sabotage correct execution since the rhs may be
# hoisted above the ast.IfNode and cause it to get executed prior to
# passing the lhs check.
should_visit_left = True
and_or_operator = n.operator in ('and', 'or')
if and_or_operator:
self.binop_count += 1
cache_placeholders = self.options.cache_resolved_placeholders
cache_udn_expressions = self.options.cache_resolved_udn_expressions
# If this is the first binop, we can visit the LHS since that must
# always be executed.
if self.binop_count == _BINOP_FIRST_PASS:
self.visit_ast(n.left, n)
should_visit_left = False
self.options.cache_resolved_placeholders = False
self.options.cache_resolved_udn_expressions = False
if should_visit_left:
self.visit_ast(n.left, n)
self.visit_ast(n.right, n)
if and_or_operator:
self.binop_count -= 1
self.options.cache_resolved_placeholders = cache_placeholders
self.options.cache_resolved_udn_expressions = cache_udn_expressions
analyzeBinOpExpressionNode = analyzeBinOpNode
def analyzeUnaryOpNode(self, op_node):
self.visit_ast(op_node.expression, op_node)
def analyzeGetUDNNode(self, node):
if not self.options.prefer_whole_udn_expressions:
self.visit_ast(node.expression, node)
# If self._filter_function is created in a macro, make sure we rename
# it.
self_node = ast.IdentifierNode('self')
if node.expression == self_node and node.name == '_filter_function':
alias = ast.IdentifierNode('_self_private_filter_function',
pos=node.pos)
node.parent.replace(node, alias)
return
# If self.filter_function is created in a macro, make sure we rename it.
if node.expression == self_node and node.name == 'filter_function':
alias = ast.IdentifierNode('_self_filter_function', pos=node.pos)
node.parent.replace(node, alias)
return
if self.options.cache_resolved_udn_expressions:
cached_udn = ast.IdentifierNode(
_generate_cached_resolved_placeholder(node),
pos=node.pos)
(local_identifiers, _, _) = _get_local_identifiers(node)
if cached_udn in local_identifiers:
node.parent.replace(node, cached_udn)
else:
insert_block, insert_marker = self.get_insert_block_and_point(
node)
# if there is a reassignment in the parent block, don't cache
# this incase it needs to be re-resolved.
# #set $text = $text.replace('\r\n', '\n')
# #set $text = $text.replace('\t', ' ')
# in this example, if you cache the udn expression text.replace,
# you have a problem - you won't ever use the new string create
# by the first call to replace
for child_node in insert_block.child_nodes:
if (isinstance(child_node, ast.AssignNode) and
child_node.left == node.expression):
return
scope = self.get_parent_scope(node)
scope.alias_name_set.add(cached_udn.name)
scope.aliased_expression_map[node] = cached_udn
# note: this is sketchy enough that it requires some explanation
# basically, you need to visit the node for the parent function
# to get the memo that this value is aliased. unfortunately, the
# naive case of just calling visit_ast blows up since it tries
# to double analyze a certain set of nodes. you only really need
# to analyze that the assignment took place, then you can safely
# alias the actual function call. definitely sketchy, but it
# does seem to work
assign_rph = ast.AssignNode(cached_udn, None, pos=node.pos)
cached_udn.parent = assign_rph
insert_block.insert_before(insert_marker, assign_rph)
self.visit_ast(assign_rph, insert_block)
assign_rph.right = node
node.parent.replace(node, cached_udn)
elif self.options.prefer_whole_udn_expressions:
self.visit_ast(node.expression, node)
def analyzeSliceNode(self, pnode):
self.visit_ast(pnode.expression, pnode)
self.visit_ast(pnode.slice_expression, pnode)
# a second pass over the optimized tree to hoist invariant aliases to their
# parent blocks
class FinalPassAnalyzer(_BaseAnalyzer):
def analyzeTemplateNode(self, template):
self.visit_ast(template.main_function, template)
for n in template.child_nodes:
self.visit_ast(n, template)
def analyzeFunctionNode(self, function):
for n in function.child_nodes:
self.visit_ast(n, function)
if self.options.batch_buffer_writes:
self.collect_writes(function)
def analyzeForNode(self, for_node):
for n in for_node.child_nodes:
self.visit_ast(n, for_node)
self.reanalyzeLoopNode(for_node)
if self.options.batch_buffer_writes:
self.collect_writes(for_node)
def analyzeIfNode(self, if_node):
# depth-first
for n in if_node.child_nodes:
self.visit_ast(n, if_node)
for n in if_node.else_.child_nodes:
self.visit_ast(n, if_node.else_)
self.reanalyzeConditionalNode(if_node)
self.reanalyzeConditionalNode(if_node.else_)
if self.options.batch_buffer_writes:
self.collect_writes(if_node)
self.collect_writes(if_node.else_)
def analyzeBufferWrite(self, buffer_write):
"""Perform ast.BufferWrite optimizations.
Do this in the final pass optimizer to make sure that the
optimization is handled after caching placeholders.
"""
self.visit_ast(buffer_write.expression, buffer_write)
# All filterning is done before writing to the buffer. If the function
# output needed filtering then it would be wrapped in a ast.FilterNode.
if isinstance(buffer_write.expression, ast.CallFunctionNode):
buffer_write.expression.sanitization_state = (
ast.SanitizedState.OUTPUTTED_IMMEDIATELY)
def hoist(self, parent_node, parent_block, insertion_point, alias_node,
assign_alias_node):
# prune the implementation in the nested block
# print "prune", alias_node
# print " ".join("parent_block aliases",
# parent_block.scope.aliased_expression_map)
parent_node.remove(assign_alias_node)
# if we've already hoisted an assignment, don't do it again
if alias_node not in parent_block.scope.hoisted_aliases:
# prune the original implementation in the current block and
# reinsert the alias before it's first potential usage if it
# is needed earlier in the execution path.
# when a variable aliased in both the if and
# else blocks is promoted to the parent scope
# the implementation isn't actually hoisted (should it be?)
# inline with the ast.IfNode optimization so we need to check if the
# node is already here
if assign_alias_node in parent_block.child_nodes:
current_pos = parent_block.child_nodes.index(assign_alias_node)
# an else node's parent is the ast.IfNode, which is the relevant
# node when searching for the insertion point
needed_pos = parent_block.child_nodes.index(insertion_point)
if needed_pos < current_pos:
parent_block.child_nodes.remove(assign_alias_node)
if isinstance(parent_node, ast.ElseNode):
parent_block.insert_before(parent_node.parent,
assign_alias_node)
else:
parent_block.insert_before(parent_node,
assign_alias_node)
# print "insert_before", alias_node
else:
# still need to insert the alias
parent_block.insert_before(insertion_point, assign_alias_node)
parent_block.scope.hoisted_aliases.append(alias_node)
# NOTE: once we hoist an expression, we need to make sure that we no
# longer use this for dependencies in the current scope
del parent_node.scope.aliased_expression_map[alias_node]
parent_node.scope.alias_name_set.remove(assign_alias_node.left.name)
# FIXME: this is probably an indication of a bug or unnecessary
# difference between the caching of placeholders and filter expressions
if not isinstance(alias_node, ast.FilterNode):
parent_node.scope.local_identifiers.remove(assign_alias_node.left)
def make_write_node(self, write_list):
"""Convert a list of expressions to be written into a single
ast.ASTNode. This will return either be a ast.BufferWrite node, a
ast.BufferExtend node or None if write_list is empty.
"""
if not write_list:
return None
# If there is only a single node, avoid the overhead of
# constructing a tuple.
if len(write_list) == 1:
return ast.BufferWrite(write_list[0])
tuple_node = ast.TupleLiteralNode()
for exp in write_list:
tuple_node.append(exp)
return ast.BufferExtend(tuple_node)
def collect_writes(self, node):
"""Look at all the writes that are children of the node and move them to
the latest place they can be safely placed. This means that it either
goes before an (if/function/loop) or at the end of the list of nodes.
Once they are all together, they can be batched into a buffer.extend
operation. This avoids list resize operations.
"""
passable_nodes = (ast.AssignNode, ast.CacheNode)
# ast.ASTNode.insert_before is broken in a way that if there are
# duplicate nodes, inserts always occur before the first one. To
# deal with this, we construct a new ast.NodeList rather than
# modifying the original one.
old_node_list = node.child_nodes
node.child_nodes = ast.NodeList()
write_list = []
for n in old_node_list:
if isinstance(n, ast.BufferWrite):
write_list.append(n.expression)
elif isinstance(n, ast.BufferExtend):
write_list.extend(n.expression.child_nodes)
elif not isinstance(n, passable_nodes):
write_node = self.make_write_node(write_list)
if write_node:
node.append(write_node)
write_list = []
node.append(n)
else:
node.append(n)
write_node = self.make_write_node(write_list)
if write_node:
node.append(write_node)
template_function_re = re.compile('^[^#]*#(def|block)\s+(\w+)')
extends_re = re.compile('^#extends\s+([\.\w]+)')
template_extensions = ('.spt', '.tmpl')
def _extend_to_real_path(base_dir, ex_path):
for ext in template_extensions:
rpath = os.path.join(base_dir, ex_path) + ext
if os.path.exists(rpath):
return rpath
raise Exception(
'could not find .spt or .tmpl file for %s during dependency check' %
ex_path)
# scan an spt file for template functions it will output
def get_template_functions(base_dir, path):
template_function_names = set()
path = _extend_to_real_path(base_dir, path)
if not path:
return template_function_names
f = open(path)
for line in f:
match = template_function_re.match(line)
if match:
template_function_names.add(match.group(2))
continue
match = extends_re.match(line)
if match:
extend_name = match.group(1)
extend_path = extend_name.replace('.', '/')
template_function_names.update(get_template_functions(base_dir,
extend_path))
f.close()
return template_function_names
|
|
from collections import namedtuple
from copy import copy
from .array import Array, ArrayError
from .location import Location
class BoardError(Exception):
pass
class Board(Array):
"""
Stores board locations. Provides methods to carry out game logic.
"""
BLACK = Location('black')
WHITE = Location('white')
EMPTY = Location('empty')
TURNS = (
BLACK,
WHITE,
)
State = namedtuple('State', ['board', 'turn', 'score'])
def __init__(self, width):
super(Board, self).__init__(width, width, self.EMPTY)
# Turn counter
self._turn = self.BLACK
# Player scores
self._score = {
self.BLACK: 0,
self.WHITE: 0,
}
# Game history
self._history = []
self._redo = []
@property
def turn(self):
"""
Gets the current turn.
"""
return repr(self._turn)
@property
def score(self):
"""
Gets the current score.
"""
return {
'black': self._score[self.BLACK],
'white': self._score[self.WHITE],
}
@property
def _next_turn(self):
"""
Gets color of next turn.
"""
return self.TURNS[self._turn is self.BLACK]
def move(self, x, y):
"""
Makes a move at the given location for the current turn's color.
"""
# Check if coordinates are occupied
if self[x, y] is not self.EMPTY:
raise BoardError('Cannot move on top of another piece!')
# Store history and make move
self._push_history()
self[x, y] = self._turn
# Check if any pieces have been taken
taken = self._take_pieces(x, y)
# Check if move is suicidal. A suicidal move is a move that takes no
# pieces and is played on a coordinate which has no liberties.
if taken == 0:
self._check_for_suicide(x, y)
# Check if move is redundant. A redundant move is one that would
# return the board to the state at the time of a player's last move.
self._check_for_ko()
self._flip_turn()
self._redo = []
def _check_for_suicide(self, x, y):
"""
Checks if move is suicidal.
"""
if self.count_liberties(x, y) == 0:
self._pop_history()
raise BoardError('Cannot play on location with no liberties!')
def _check_for_ko(self):
"""
Checks if board state is redundant.
"""
try:
if self._array == self._history[-2][0]:
self._pop_history()
raise BoardError('Cannot make a move that is redundant!')
except IndexError:
# Insufficient history...let this one slide
pass
def _take_pieces(self, x, y):
"""
Checks if any pieces were taken by the last move at the specified
coordinates. If so, removes them from play and tallies resulting
points.
"""
scores = []
for p, (x1, y1) in self._get_surrounding(x, y):
# If location is opponent's color and has no liberties, tally it up
if p is self._next_turn and self.count_liberties(x1, y1) == 0:
score = self._kill_group(x1, y1)
scores.append(score)
self._tally(score)
return sum(scores)
def _flip_turn(self):
"""
Iterates the turn counter.
"""
self._turn = self._next_turn
return self._turn
@property
def _state(self):
"""
Returns the game state as a named tuple.
"""
return self.State(self.copy._array, self._turn, copy(self._score))
def _load_state(self, state):
"""
Loads the specified game state.
"""
self._array, self._turn, self._score = state
def _push_history(self):
"""
Pushes game state onto history.
"""
self._history.append(self._state)
def _pop_history(self):
"""
Pops and loads game state from history.
"""
current_state = self._state
try:
self._load_state(self._history.pop())
return current_state
except IndexError:
return None
def undo(self):
"""
Undoes one move.
"""
state = self._pop_history()
if state:
self._redo.append(state)
return state
else:
raise BoardError('No moves to undo!')
def redo(self):
"""
Re-applies one move that was undone.
"""
try:
self._push_history()
self._load_state(self._redo.pop())
except IndexError:
self._pop_history()
raise BoardError('No undone moves to redo!')
def _tally(self, score):
"""
Adds points to the current turn's score.
"""
self._score[self._turn] += score
def _get_none(self, x, y):
"""
Same thing as Array.__getitem__, but returns None if coordinates are
not within array dimensions.
"""
try:
return self[x, y]
except ArrayError:
return None
def _get_surrounding(self, x, y):
"""
Gets information about the surrounding locations for a specified
coordinate. Returns a tuple of the locations clockwise starting from
the top.
"""
coords = (
(x, y - 1),
(x + 1, y),
(x, y + 1),
(x - 1, y),
)
return filter(lambda i: bool(i[0]), [
(self._get_none(a, b), (a, b))
for a, b in coords
])
def _get_group(self, x, y, traversed):
"""
Recursively traverses adjacent locations of the same color to find all
locations which are members of the same group.
"""
loc = self[x, y]
# Get surrounding locations which have the same color and whose
# coordinates have not already been traversed
locations = [
(p, (a, b))
for p, (a, b) in self._get_surrounding(x, y)
if p is loc and (a, b) not in traversed
]
# Add current coordinates to traversed coordinates
traversed.add((x, y))
# Find coordinates of similar neighbors
if locations:
return traversed.union(*[
self._get_group(a, b, traversed)
for _, (a, b) in locations
])
else:
return traversed
def get_group(self, x, y):
"""
Gets the coordinates for all locations which are members of the same
group as the location at the given coordinates.
"""
if self[x, y] not in self.TURNS:
raise BoardError('Can only get group for black or white location')
return self._get_group(x, y, set())
def _kill_group(self, x, y):
"""
Kills a group of black or white pieces and returns its size for
scoring.
"""
if self[x, y] not in self.TURNS:
raise BoardError('Can only kill black or white group')
group = self.get_group(x, y)
score = len(group)
for x1, y1 in group:
self[x1, y1] = self.EMPTY
return score
def _get_liberties(self, x, y, traversed):
"""
Recursively traverses adjacent locations of the same color to find all
surrounding liberties for the group at the given coordinates.
"""
loc = self[x, y]
if loc is self.EMPTY:
# Return coords of empty location (this counts as a liberty)
return set([(x, y)])
else:
# Get surrounding locations which are empty or have the same color
# and whose coordinates have not already been traversed
locations = [
(p, (a, b))
for p, (a, b) in self._get_surrounding(x, y)
if (p is loc or p is self.EMPTY) and (a, b) not in traversed
]
# Mark current coordinates as having been traversed
traversed.add((x, y))
# Collect unique coordinates of surrounding liberties
if locations:
return set.union(*[
self._get_liberties(a, b, traversed)
for _, (a, b) in locations
])
else:
return set()
def get_liberties(self, x, y):
"""
Gets the coordinates for liberties surrounding the group at the given
coordinates.
"""
return self._get_liberties(x, y, set())
def count_liberties(self, x, y):
"""
Gets the number of liberties surrounding the group at the given
coordinates.
"""
return len(self.get_liberties(x, y))
|
|
"""
To run this test, type this in command line <kolibri manage test -- kolibri.content>
"""
import datetime
import os
import shutil
import tempfile
from django.test import TestCase
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.db import connections
from django.test.utils import override_settings
from kolibri.content import models as content
from django.conf import settings
from le_utils.constants import content_kinds
from ..content_db_router import set_active_content_database, using_content_database
from ..errors import ContentModelUsedOutsideDBContext
from rest_framework.test import APITestCase
from kolibri.auth.models import DeviceOwner, Facility, FacilityUser
from kolibri.logger.models import ContentSummaryLog
CONTENT_STORAGE_DIR_TEMP = tempfile.mkdtemp()
CONTENT_DATABASE_DIR_TEMP = tempfile.mkdtemp()
@override_settings(
CONTENT_STORAGE_DIR=CONTENT_STORAGE_DIR_TEMP,
CONTENT_DATABASE_DIR=CONTENT_DATABASE_DIR_TEMP,
)
class ContentNodeTestCase(TestCase):
"""
Testcase for content metadata methods
"""
fixtures = ['content_test.json']
multi_db = True
the_channel_id = 'content_test'
connections.databases[the_channel_id] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
def setUp(self):
# create DeviceOwner to pass the setup_wizard middleware check
DeviceOwner.objects.create(username='test-device-owner', password=123)
# set the active content database for the duration of the test
set_active_content_database(self.the_channel_id)
# Create a temporary directory
self.test_dir = tempfile.mkdtemp()
# Create files in the temporary directory
self.temp_f_1 = open(os.path.join(self.test_dir, 'test_1.pdf'), 'wb')
self.temp_f_2 = open(os.path.join(self.test_dir, 'test_2.mp4'), 'wb')
# Write something to it
self.temp_f_1.write(('The owls are not what they seem').encode('utf-8'))
self.temp_f_2.write(('The owl are not what they seem').encode('utf-8'))
# Reopen the file and check if what we read back is the same
self.temp_f_1 = open(os.path.join(self.test_dir, 'test_1.pdf'))
self.temp_f_2 = open(os.path.join(self.test_dir, 'test_2.mp4'))
self.assertEqual(self.temp_f_1.read(), 'The owls are not what they seem')
self.assertEqual(self.temp_f_2.read(), 'The owl are not what they seem')
"""Tests for content API methods"""
def test_get_prerequisites_for(self):
"""
test the directional characteristic of prerequisite relationship
"""
c1 = content.ContentNode.objects.get(title="c1")
root = content.ContentNode.objects.get(title="root")
# if root is the prerequisite of c1
expected_output = content.ContentNode.objects.filter(title__in=["root"])
actual_output = content.ContentNode.objects.filter(prerequisite_for=c1)
self.assertEqual(set(expected_output), set(actual_output))
# then c1 should not be the prerequisite of root
unexpected_output = content.ContentNode.objects.filter(title__in=["c1"])
actual_output = content.ContentNode.objects.filter(prerequisite_for=root)
self.assertNotEqual(set(actual_output), set(unexpected_output))
def test_get_has_prerequisites(self):
"""
test the directional characteristic of prerequisite relationship
"""
c1 = content.ContentNode.objects.get(title="c1")
root = content.ContentNode.objects.get(title="root")
# if root is the prerequisite of c1
expected_output = content.ContentNode.objects.filter(title__in=["c1"])
actual_output = content.ContentNode.objects.filter(has_prerequisite=root)
self.assertEqual(set(expected_output), set(actual_output))
# then c1 should not be the prerequisite of root
unexpected_output = content.ContentNode.objects.filter(title__in=["root"])
actual_output = content.ContentNode.objects.filter(has_prerequisite=c1)
self.assertNotEqual(set(actual_output), set(unexpected_output))
def test_get_all_related(self):
"""
test the nondirectional characteristic of related relationship
"""
c1 = content.ContentNode.objects.get(title="c1")
c2 = content.ContentNode.objects.get(title="c2")
# if c1 is related to c2
expected_output = content.ContentNode.objects.filter(title__in=["c2"])
actual_output = content.ContentNode.objects.filter(related=c1)
self.assertEqual(set(expected_output), set(actual_output))
# then c2 should be related to c1
expected_output = content.ContentNode.objects.filter(title__in=["c1"])
actual_output = content.ContentNode.objects.filter(related=c2)
self.assertEqual(set(expected_output), set(actual_output))
def test_descendants_of_kind(self):
p = content.ContentNode.objects.get(title="root")
expected_output = content.ContentNode.objects.filter(title__in=["c2"])
actual_output = p.get_descendants(include_self=False).filter(kind=content_kinds.TOPIC)
self.assertEqual(set(expected_output), set(actual_output))
def test_get_top_level_topics(self):
p = content.ContentNode.objects.get(title="root")
expected_output = content.ContentNode.objects.filter(parent=p, kind=content_kinds.TOPIC)
actual_output = content.ContentNode.objects.get(parent__isnull=True).get_children().filter(kind=content_kinds.TOPIC)
self.assertEqual(set(expected_output), set(actual_output))
def test_all_str(self):
# test for File __str__
p = content.File.objects.get(id="725257a0570044acbd59f8cf6a68b2bf")
self.assertEqual(str(p), '.mp4')
# test for ContentTag __str__
p = content.ContentTag.objects.get(tag_name="tag_2")
self.assertEqual(str(p), 'tag_2')
# test for Language __str__
p = content.Language.objects.get(lang_code="en")
self.assertEqual(str(p), 'en')
# test for ChannelMetadata __str__
p = content.ChannelMetadata.objects.get(name="testing")
self.assertEqual(str(p), 'testing')
def tearDown(self):
"""
clean up files/folders created during the test
"""
# set the active content database to None now that the test is over
set_active_content_database(None)
try:
shutil.rmtree(settings.CONTENT_COPY_DIR)
shutil.rmtree(self.test_dir)
except:
pass
super(ContentNodeTestCase, self).tearDown()
@override_settings(
CONTENT_STORAGE_DIR=CONTENT_STORAGE_DIR_TEMP,
CONTENT_DATABASE_DIR=CONTENT_DATABASE_DIR_TEMP,
)
class DatabaseRoutingTests(TestCase):
multi_db = True
the_channel_id = 'content_test'
connections.databases[the_channel_id] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
def test_accessing_node_without_active_db_throws_exception(self):
set_active_content_database(None)
with self.assertRaises(ContentModelUsedOutsideDBContext):
list(content.ContentNode.objects.all())
def test_accessing_data_within_context_manager_works(self):
with using_content_database(self.the_channel_id):
list(content.ContentNode.objects.all())
def test_accessing_data_within_decorated_function_works(self):
@using_content_database(self.the_channel_id)
def my_func():
return list(content.ContentNode.objects.all())
my_func()
def test_accessing_nonexistent_db_raises_error(self):
with self.assertRaises(KeyError):
with using_content_database("nonexistent_db"):
list(content.ContentNode.objects.all())
def test_database_on_disk_works_too(self):
the_other_channel_id = 'content_test_2'
filename = os.path.join(settings.CONTENT_DATABASE_DIR, the_other_channel_id + '.sqlite3')
connections.databases[the_other_channel_id] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': filename,
}
call_command('migrate', database=the_other_channel_id)
del connections.databases[the_other_channel_id]
with using_content_database(the_other_channel_id):
list(content.ContentNode.objects.all())
def test_empty_database_on_disk_throws_error(self):
yet_another_channel_id = 'content_test_3'
filename = os.path.join(settings.CONTENT_DATABASE_DIR, yet_another_channel_id + '.sqlite3')
open(filename, 'a').close() # touch the file to create an empty DB
with self.assertRaises(KeyError):
with using_content_database(yet_another_channel_id):
list(content.ContentNode.objects.all())
del connections.databases[yet_another_channel_id]
@override_settings(
CONTENT_STORAGE_DIR=CONTENT_STORAGE_DIR_TEMP,
CONTENT_DATABASE_DIR=CONTENT_DATABASE_DIR_TEMP,
)
class ContentNodeAPITestCase(APITestCase):
"""
Testcase for content API methods
"""
fixtures = ['content_test.json']
multi_db = True
the_channel_id = '15137d33c49f489ebe08893bfa6b5414'
connections.databases[the_channel_id] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
def setUp(self):
# create DeviceOwner to pass the setup_wizard middleware check
DeviceOwner.objects.create(username='test-device-owner', password=123)
# set the active content database for the duration of the test
set_active_content_database(self.the_channel_id)
def _reverse_channel_url(self, pattern_name, extra_kwargs={}):
"""Helper method to reverse a URL using the current channel ID"""
kwargs = {"channel_id": self.the_channel_id}
kwargs.update(extra_kwargs)
return reverse(pattern_name, kwargs=kwargs)
def test_prerequisite_for_filter(self):
c1_id = content.ContentNode.objects.get(title="c1").id
response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"prerequisite_for": c1_id})
self.assertEqual(response.data[0]['title'], 'root')
def test_has_prerequisite_filter(self):
root_id = content.ContentNode.objects.get(title="root").id
response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"has_prerequisite": root_id})
self.assertEqual(response.data[0]['title'], 'c1')
def test_related_filter(self):
c1_id = content.ContentNode.objects.get(title="c1").id
response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"related": c1_id})
self.assertEqual(response.data[0]['title'], 'c2')
def test_contentnode_list(self):
response = self.client.get(self._reverse_channel_url("contentnode-list"))
self.assertEqual(len(response.data), 6)
def test_contentnode_retrieve(self):
c1_id = content.ContentNode.objects.get(title="c1").id
response = self.client.get(self._reverse_channel_url("contentnode-detail", {'pk': c1_id}))
self.assertEqual(response.data['pk'], c1_id.__str__())
def test_contentnode_field_filtering(self):
c1_id = content.ContentNode.objects.get(title="c1").id
response = self.client.get(self._reverse_channel_url("contentnode-detail", {'pk': c1_id}), data={"fields": "title,description"})
self.assertEqual(response.data['title'], "c1")
self.assertEqual(response.data['description'], "balbla2")
self.assertTrue("pk" not in response.data)
def test_contentnode_recommendations(self):
root_id = content.ContentNode.objects.get(title="root").id
response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"recommendations_for": root_id})
self.assertEqual(len(response.data), 4)
def test_channelmetadata_list(self):
data = content.ChannelMetadata.objects.values()[0]
content.ChannelMetadataCache.objects.create(**data)
response = self.client.get(reverse("channel-list", kwargs={}))
self.assertEqual(response.data[0]['name'], 'testing')
def test_channelmetadata_retrieve(self):
data = content.ChannelMetadata.objects.values()[0]
content.ChannelMetadataCache.objects.create(**data)
response = self.client.get(reverse("channel-detail", kwargs={'pk': data["id"]}))
self.assertEqual(response.data['name'], 'testing')
def test_file_list(self):
response = self.client.get(self._reverse_channel_url("file-list"))
self.assertEqual(len(response.data), 5)
def test_file_retrieve(self):
response = self.client.get(self._reverse_channel_url("file-detail", {'pk': "9f9438fe6b0d42dd8e913d7d04cfb2b1"}))
self.assertEqual(response.data['preset'], 'High Resolution')
def test_contentnode_progress(self):
# set up data for testing progress_fraction field on content node endpoint
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="learner", facility=facility)
user.set_password("pass")
user.save()
root = content.ContentNode.objects.get(title="root")
c1 = content.ContentNode.objects.get(title="c1")
c2 = content.ContentNode.objects.get(title="c2")
c2c1 = content.ContentNode.objects.get(title="c2c1")
c2c3 = content.ContentNode.objects.get(title="c2c3")
for node, progress in [(c2c1, 0.7), (c2c3, 0.5)]:
ContentSummaryLog.objects.create(
user=user,
content_id=node.content_id,
progress=progress,
channel_id=self.the_channel_id,
start_timestamp=datetime.datetime.now()
)
def assert_progress(node, progress):
response = self.client.get(self._reverse_channel_url("contentnode-detail", {'pk': node.id}))
self.assertEqual(response.data["progress_fraction"], progress)
# check that there is no progress when not logged in
assert_progress(root, 0)
assert_progress(c1, 0)
assert_progress(c2, 0)
assert_progress(c2c1, 0)
# check that progress is calculated appropriately when user is logged in
self.client.login(username="learner", password="pass", facility=facility)
assert_progress(root, 0.3)
assert_progress(c1, 0)
assert_progress(c2, 0.4)
assert_progress(c2c1, 0.7)
def tearDown(self):
"""
clean up files/folders created during the test
"""
# set the active content database to None now that the test is over
set_active_content_database(None)
super(ContentNodeAPITestCase, self).tearDown()
|
|
"""
Custom storage for django with Mosso Cloud Files backend.
Created by Rich Leland <rich@richleland.com>.
"""
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files import File
from django.core.files.storage import Storage
from django.utils.text import get_valid_filename
try:
import cloudfiles
from cloudfiles.errors import NoSuchObject
except ImportError:
raise ImproperlyConfigured("Could not load cloudfiles dependency. See "
"http://www.mosso.com/cloudfiles.jsp.")
# TODO: implement TTL into cloudfiles methods
CLOUDFILES_TTL = getattr(settings, 'CLOUDFILES_TTL', 600)
def cloudfiles_upload_to(self, filename):
"""
Simple, custom upload_to because Cloud Files doesn't support
nested containers (directories).
Actually found this out from @minter:
@richleland The Cloud Files APIs do support pseudo-subdirectories, by
creating zero-byte files with type application/directory.
May implement in a future version.
"""
return get_valid_filename(filename)
class CloudFilesStorage(Storage):
"""
Custom storage for Mosso Cloud Files.
"""
default_quick_listdir = True
def __init__(self, username=None, api_key=None, container=None,
connection_kwargs=None):
"""
Initialize the settings for the connection and container.
"""
self.username = username or settings.CLOUDFILES_USERNAME
self.api_key = api_key or settings.CLOUDFILES_API_KEY
self.container_name = container or settings.CLOUDFILES_CONTAINER
self.connection_kwargs = connection_kwargs or {}
def __getstate__(self):
"""
Return a picklable representation of the storage.
"""
return dict(username=self.username,
api_key=self.api_key,
container_name=self.container_name,
connection_kwargs=self.connection_kwargs)
def _get_connection(self):
if not hasattr(self, '_connection'):
self._connection = cloudfiles.get_connection(self.username,
self.api_key, **self.connection_kwargs)
return self._connection
def _set_connection(self, value):
self._connection = value
connection = property(_get_connection, _set_connection)
def _get_container(self):
if not hasattr(self, '_container'):
self.container = self.connection.get_container(
self.container_name)
return self._container
def _set_container(self, container):
"""
Set the container, making it publicly available (on Limelight CDN) if
it is not already.
"""
if not container.is_public():
container.make_public()
if hasattr(self, '_container_public_uri'):
delattr(self, '_container_public_uri')
self._container = container
container = property(_get_container, _set_container)
def _get_container_url(self):
if not hasattr(self, '_container_public_uri'):
self._container_public_uri = self.container.public_uri()
return self._container_public_uri
container_url = property(_get_container_url)
def _get_cloud_obj(self, name):
"""
Helper function to get retrieve the requested Cloud Files Object.
"""
return self.container.get_object(name)
def _open(self, name, mode='rb'):
"""
Return the CloudFilesStorageFile.
"""
return CloudFilesStorageFile(storage=self, name=name)
def _save(self, name, content):
"""
Use the Cloud Files service to write ``content`` to a remote file
(called ``name``).
"""
cloud_obj = self.container.create_object(name)
cloud_obj.size = content.size
content.open()
# If the content type is available, pass it in directly rather than
# getting the cloud object to try to guess.
if hasattr(content.file, 'content_type'):
cloud_obj.content_type = content.file.content_type
cloud_obj.send(content)
content.close()
return name
def delete(self, name):
"""
Deletes the specified file from the storage system.
"""
self.container.delete_object(name)
def exists(self, name):
"""
Returns True if a file referenced by the given name already exists in
the storage system, or False if the name is available for a new file.
"""
try:
self._get_cloud_obj(name)
return True
except NoSuchObject:
return False
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple; the
first being an empty list of directories (not available for quick-
listing), the second being a list of filenames.
If the list of directories is required, use the full_listdir method.
"""
files = []
if path and not path.endswith('/'):
path = '%s/' % path
path_len = len(path)
for name in self.container.list_objects(path=path):
files.append(name[path_len:])
return ([], files)
def full_listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple of lists;
the first item being directories, the second item being files.
On large containers, this may be a slow operation for root containers
because every single object must be returned (cloudfiles does not
provide an explicit way of listing directories).
"""
dirs = set()
files = []
if path and not path.endswith('/'):
path = '%s/' % path
path_len = len(path)
for name in self.container.list_objects(prefix=path):
name = name[path_len:]
slash = name[1:-1].find('/') + 1
if slash:
dirs.add(name[:slash])
elif name:
files.append(name)
dirs = list(dirs)
dirs.sort()
return (dirs, files)
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
return self._get_cloud_obj(name).size
def url(self, name):
"""
Returns an absolute URL where the file's contents can be accessed
directly by a web browser.
"""
return '%s/%s' % (self.container_url, name)
class CloudFilesStorageFile(File):
closed = False
def __init__(self, storage, name, *args, **kwargs):
self._storage = storage
super(CloudFilesStorageFile, self).__init__(file=None, name=name,
*args, **kwargs)
def _get_size(self):
if not hasattr(self, '_size'):
self._size = self._storage.size(self.name)
return self._size
def _set_size(self, size):
self._size = size
size = property(_get_size, _set_size)
def _get_file(self):
if not hasattr(self, '_file'):
self._file = self._storage._get_cloud_obj(self.name)
return self._file
def _set_file(self, value):
if value is None:
if hasattr(self, '_file'):
del self._file
else:
self._file = value
file = property(_get_file, _set_file)
def read(self, num_bytes=None):
data = self.file.read(size=num_bytes or -1, offset=self._pos)
self._pos += len(data)
return data
def open(self, *args, **kwargs):
"""
Open the cloud file object.
"""
self.file
self._pos = 0
def close(self, *args, **kwargs):
self._pos = 0
@property
def closed(self):
return not hasattr(self, '_file')
def seek(self, pos):
self._pos = pos
|
|
#!/usr/bin/env python
import sys
import tempfile
import time
import os
import subprocess
import argparse
import logging
from contextlib import contextmanager
import uuid
log = logging.getLogger("serviced-tests")
SERVICED_ROOT = os.path.dirname(os.path.abspath(__file__))
DEVNULL = open(os.devnull, 'w')
def fail(msg):
log.critical(msg)
sys.exit(1)
def which(prog):
return subprocess.check_output("which %s" % prog, shell=True).strip()
@contextmanager
def elastic_server(port):
try:
log.info("Starting elastic on port %d " % port)
container_name = str(uuid.uuid4())
# TODO: handle already started
# TODO: Get image name from serviced binary or isvcs.go
# TODO: Wait for start more betterly
master_nodes = node_name = "elasticsearch-serviced"
cluster_name = str(uuid.uuid4())
cmd = ["docker", "run", "-d", "--name", container_name, "--user", "1001:1001",
"--env", "ES_JAVA_HOME=/opt/elasticsearch-serviced/jdk",
"-p", "%d:9200" % port, "zenoss/serviced-isvcs:v71",
"sh", "-c", "/opt/elasticsearch-serviced/bin/elasticsearch",
"-E", "cluster.initial_master_nodes=%s" % master_nodes,
"-E", "node.name=%s" % node_name,
"-E", "cluster.name=%s" % cluster_name]
subprocess.call(cmd)
time.sleep(10)
yield
finally:
log.info("Stopping elastic")
subprocess.call(["docker", "stop", container_name])
@contextmanager
def dummy(*args, **kwargs):
yield
def ensure_tool(executable, importpath):
try:
return which(executable)
except subprocess.CalledProcessError:
log.info("Installing %s tool" % executable)
subprocess.call(["go", "get", importpath])
return which(executable)
def has_dm_deferred_remove():
"""
Test whether libdevmapper.h is new enough to support deferred remove
functionality by compiling a file to see if the function is defined.
"""
cmd = """
command -v gcc && ! (
cat <<EOF | gcc -ldevmapper -xc -
#include <libdevmapper.h>
int main() { dm_task_deferred_remove(NULL); }
EOF
)
"""
try:
subprocess.check_call(cmd, shell=True, stdout=DEVNULL, stderr=subprocess.STDOUT)
return False
except subprocess.CalledProcessError:
return True
def args():
"""
--all (some subset that is useful for someone)
--packages (maybe positional?)
"""
parser = argparse.ArgumentParser("serviced-tests")
parser.add_argument("-v", "--verbose", action="store_true", help="verbose logging")
types = parser.add_argument_group("Test Type")
types.add_argument("--unit", action="store_true", help="pass the 'unit' build tag")
types.add_argument("--integration", action="store_true", help="pass the 'integration' build tag")
options = parser.add_argument_group("Test Options")
options.add_argument("--quick", action="store_true", help="don't run tests with the '!quick' build constraint")
options.add_argument("--root", action="store_true", help="run the tests as the root user")
options.add_argument("--race", action="store_true", help="run tests with race detection")
options.add_argument("--cover", action="store_true", help="run tests with coverage")
options.add_argument("--tag", action="append", help="optional extra build tag (may be specified multiple times)")
options.add_argument("--include_vendor", action="store_true", dest="include_vendor", help="run tests against the vendor directory")
coverage = parser.add_argument_group("Coverage Options")
coverage.add_argument("--cover-html", required=False, help="output file for HTML coverage report")
coverage.add_argument("--cover-xml", required=False, help="output file for Cobertura coverage report")
fixtures = parser.add_argument_group("Fixture Options")
fixtures.add_argument("--elastic", action="store_true", help="start an elastic server before the test run")
fixtures.add_argument("--elastic-port", type=int, help="elastic server port", default=9202)
parser.add_argument("--packages", nargs="*", help="serviced packages to test, relative to the serviced root (defaults to ./...)")
parser.add_argument("arguments", nargs=argparse.REMAINDER, help="optional arguments to be passed through to the test runner")
return parser.parse_args()
def build_tags(options):
tags = options.tag or []
# We always need the daemon tag
tags.append("daemon")
if not has_dm_deferred_remove():
tags.append("libdm_no_deferred_remove")
if options.unit:
tags.append("unit")
if options.integration:
tags.append('integration')
if options.quick:
tags.append('quick')
if options.root:
tags.append('root')
log.debug("Using build tags: %s" % tags)
return tags
def main(options):
logging.basicConfig(level=logging.DEBUG if options.verbose else logging.INFO)
if not any((options.unit, options.integration)):
fail("No tests were specified to run. Please pass at least one of --unit or --integration.")
log.debug("Running tests under serviced in %s" % SERVICED_ROOT)
env = os.environ
env["SERVICED_HOME"] = SERVICED_ROOT
#set environment variable for serviced log directory
env["SERVICED_LOG_PATH"] = os.path.join(SERVICED_ROOT, "var_log_serviced")
# Unset EDITOR so CLI tests won't fail
env.pop("EDITOR", None)
tags = build_tags(options)
args = {}
if options.cover:
if options.race:
fail("--race and --cover are mutually exclusive.")
runner = ensure_tool("gocov", "github.com/axw/gocov/gocov")
log.debug("Using gocov executable %s" % runner)
if options.cover_html:
ensure_tool("gocov-html", "github.com/matm/gocov-html")
if options.cover_xml:
ensure_tool("gocov-xml", "github.com/AlekSi/gocov-xml")
stdout = tempfile.NamedTemporaryFile()
log.debug("Writing temporary coverage output to %s" % stdout.name)
args["stdout"] = stdout
else:
runner = which("go")
log.debug("Using go executable %s" % runner)
# TODO: Get a sudo session set up with an interactive proc
cmd = ["sudo", "-E", "PATH=%s" % env["PATH"]] if options.root else []
cmd.extend([runner, "test", "-tags", " ".join(tags)])
usep1 = False
if options.integration:
if options.cover:
env["GOMAXPROCS"] = "1"
else:
usep1 = True
if options.race:
log.debug("Running with race detection")
env["GORACE"] = "history_size=7 halt_on_error=1"
cmd.append("-race")
usep1 = True
if usep1:
cmd.extend(['-p', '1'])
packages = options.packages
if not packages:
if options.include_vendor:
packages = "./..."
else:
packages = subprocess.check_output("go list ./... | grep -v vendor", shell=True).splitlines()
cmd.extend(packages)
passthru = options.arguments
if passthru and passthru[0] == "--":
passthru = passthru[1:]
cmd.extend(passthru)
log.debug("Running command: %s" % cmd)
log.debug("Running in directory: %s" % SERVICED_ROOT)
fixture = elastic_server if options.elastic else dummy
with fixture(options.elastic_port):
try:
subprocess.check_call(
cmd,
env=env,
cwd=SERVICED_ROOT,
**args
)
except (subprocess.CalledProcessError, KeyboardInterrupt):
sys.exit(1)
if options.cover_html:
log.debug("Converting coverage to HTML")
with open(options.cover_html, 'w') as output:
subprocess.call(["gocov-html", stdout.name], stdout=output)
log.info("HTML output written to %s" % options.cover_html)
if options.cover_xml:
log.debug("Converting coverage to Cobertura XML")
with open(options.cover_xml, 'w') as output:
proc = subprocess.Popen(["gocov-xml", stdout.name], stdout=output, stdin=subprocess.PIPE)
stdout.seek(0)
proc.communicate(stdout.read())
log.info("Cobertura output written to %s" % options.cover_xml)
if __name__ == "__main__":
options = args()
main(options)
|
|
"""The tests for the MQTT light platform.
Configuration for RGB Version with brightness:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
qos: 0
payload_on: "on"
payload_off: "off"
Configuration for XY Version with brightness:
light:
platform: mqtt
name: "Office Light XY"
state_topic: "office/xy1/light/status"
command_topic: "office/xy1/light/switch"
brightness_state_topic: "office/xy1/brightness/status"
brightness_command_topic: "office/xy1/brightness/set"
xy_state_topic: "office/xy1/xy/status"
xy_command_topic: "office/xy1/xy/set"
qos: 0
payload_on: "on"
payload_off: "off"
config without RGB:
light:
platform: mqtt
name: "Office Light"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
qos: 0
payload_on: "on"
payload_off: "off"
config without RGB and brightness:
light:
platform: mqtt
name: "Office Light"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
qos: 0
payload_on: "on"
payload_off: "off"
config for RGB Version with brightness and scale:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
brightness_scale: 99
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
rgb_scale: 99
qos: 0
payload_on: "on"
payload_off: "off"
config with brightness and color temp
light:
platform: mqtt
name: "Office Light Color Temp"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
brightness_scale: 99
color_temp_state_topic: "office/rgb1/color_temp/status"
color_temp_command_topic: "office/rgb1/color_temp/set"
qos: 0
payload_on: "on"
payload_off: "off"
config with brightness and effect
light:
platform: mqtt
name: "Office Light Color Temp"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
brightness_scale: 99
effect_state_topic: "office/rgb1/effect/status"
effect_command_topic: "office/rgb1/effect/set"
effect_list:
- rainbow
- colorloop
qos: 0
payload_on: "on"
payload_off: "off"
config for RGB Version with white value and scale:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
white_value_state_topic: "office/rgb1/white_value/status"
white_value_command_topic: "office/rgb1/white_value/set"
white_value_scale: 99
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
rgb_scale: 99
qos: 0
payload_on: "on"
payload_off: "off"
config for RGB Version with RGB command template:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
rgb_command_template: "{{ '#%02x%02x%02x' | format(red, green, blue)}}"
qos: 0
payload_on: "on"
payload_off: "off"
"""
import unittest
from unittest import mock
from homeassistant.setup import setup_component
from homeassistant.const import STATE_ON, STATE_OFF, ATTR_ASSUMED_STATE
import homeassistant.components.light as light
from tests.common import (
assert_setup_component, get_test_home_assistant, mock_mqtt_component,
fire_mqtt_message)
class TestLightMQTT(unittest.TestCase):
"""Test the MQTT light."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.mock_publish = mock_mqtt_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_fail_setup_if_no_command_topic(self):
"""Test if command fails with command topic."""
with assert_setup_component(0, light.DOMAIN):
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
}
})
self.assertIsNone(self.hass.states.get('light.test'))
def test_no_color_brightness_color_temp_white_xy_if_no_topics(self): \
# pylint: disable=invalid-name
"""Test if there is no color and brightness if no topic."""
with assert_setup_component(1, light.DOMAIN):
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test_light_rgb/status',
'command_topic': 'test_light_rgb/set',
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('rgb_color'))
self.assertIsNone(state.attributes.get('brightness'))
self.assertIsNone(state.attributes.get('color_temp'))
self.assertIsNone(state.attributes.get('white_value'))
self.assertIsNone(state.attributes.get('xy_color'))
fire_mqtt_message(self.hass, 'test_light_rgb/status', 'ON')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertIsNone(state.attributes.get('rgb_color'))
self.assertIsNone(state.attributes.get('brightness'))
self.assertIsNone(state.attributes.get('color_temp'))
self.assertIsNone(state.attributes.get('white_value'))
self.assertIsNone(state.attributes.get('xy_color'))
def test_controlling_state_via_topic(self): \
# pylint: disable=invalid-name
"""Test the controlling of the state via topic."""
config = {light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test_light_rgb/status',
'command_topic': 'test_light_rgb/set',
'brightness_state_topic': 'test_light_rgb/brightness/status',
'brightness_command_topic': 'test_light_rgb/brightness/set',
'rgb_state_topic': 'test_light_rgb/rgb/status',
'rgb_command_topic': 'test_light_rgb/rgb/set',
'color_temp_state_topic': 'test_light_rgb/color_temp/status',
'color_temp_command_topic': 'test_light_rgb/color_temp/set',
'effect_state_topic': 'test_light_rgb/effect/status',
'effect_command_topic': 'test_light_rgb/effect/set',
'white_value_state_topic': 'test_light_rgb/white_value/status',
'white_value_command_topic': 'test_light_rgb/white_value/set',
'xy_state_topic': 'test_light_rgb/xy/status',
'xy_command_topic': 'test_light_rgb/xy/set',
'qos': '0',
'payload_on': 1,
'payload_off': 0
}}
with assert_setup_component(1, light.DOMAIN):
assert setup_component(self.hass, light.DOMAIN, config)
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('rgb_color'))
self.assertIsNone(state.attributes.get('brightness'))
self.assertIsNone(state.attributes.get('color_temp'))
self.assertIsNone(state.attributes.get('effect'))
self.assertIsNone(state.attributes.get('white_value'))
self.assertIsNone(state.attributes.get('xy_color'))
self.assertFalse(state.attributes.get(ATTR_ASSUMED_STATE))
fire_mqtt_message(self.hass, 'test_light_rgb/status', '1')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual([255, 255, 255], state.attributes.get('rgb_color'))
self.assertEqual(255, state.attributes.get('brightness'))
self.assertEqual(150, state.attributes.get('color_temp'))
self.assertEqual('none', state.attributes.get('effect'))
self.assertEqual(255, state.attributes.get('white_value'))
self.assertEqual([1, 1], state.attributes.get('xy_color'))
fire_mqtt_message(self.hass, 'test_light_rgb/status', '0')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
fire_mqtt_message(self.hass, 'test_light_rgb/status', '1')
self.hass.block_till_done()
fire_mqtt_message(self.hass, 'test_light_rgb/brightness/status', '100')
self.hass.block_till_done()
light_state = self.hass.states.get('light.test')
self.hass.block_till_done()
self.assertEqual(100,
light_state.attributes['brightness'])
fire_mqtt_message(self.hass, 'test_light_rgb/color_temp/status', '300')
self.hass.block_till_done()
light_state = self.hass.states.get('light.test')
self.hass.block_till_done()
self.assertEqual(300, light_state.attributes['color_temp'])
fire_mqtt_message(self.hass, 'test_light_rgb/effect/status', 'rainbow')
self.hass.block_till_done()
light_state = self.hass.states.get('light.test')
self.hass.block_till_done()
self.assertEqual('rainbow', light_state.attributes['effect'])
fire_mqtt_message(self.hass, 'test_light_rgb/white_value/status',
'100')
self.hass.block_till_done()
light_state = self.hass.states.get('light.test')
self.hass.block_till_done()
self.assertEqual(100,
light_state.attributes['white_value'])
fire_mqtt_message(self.hass, 'test_light_rgb/status', '1')
self.hass.block_till_done()
fire_mqtt_message(self.hass, 'test_light_rgb/rgb/status',
'125,125,125')
self.hass.block_till_done()
light_state = self.hass.states.get('light.test')
self.assertEqual([125, 125, 125],
light_state.attributes.get('rgb_color'))
fire_mqtt_message(self.hass, 'test_light_rgb/xy/status',
'0.675,0.322')
self.hass.block_till_done()
light_state = self.hass.states.get('light.test')
self.assertEqual([0.675, 0.322],
light_state.attributes.get('xy_color'))
def test_brightness_controlling_scale(self):
"""Test the brightness controlling scale."""
with assert_setup_component(1, light.DOMAIN):
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test_scale/status',
'command_topic': 'test_scale/set',
'brightness_state_topic': 'test_scale/brightness/status',
'brightness_command_topic': 'test_scale/brightness/set',
'brightness_scale': '99',
'qos': 0,
'payload_on': 'on',
'payload_off': 'off'
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('brightness'))
self.assertFalse(state.attributes.get(ATTR_ASSUMED_STATE))
fire_mqtt_message(self.hass, 'test_scale/status', 'on')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual(255, state.attributes.get('brightness'))
fire_mqtt_message(self.hass, 'test_scale/status', 'off')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
fire_mqtt_message(self.hass, 'test_scale/status', 'on')
self.hass.block_till_done()
fire_mqtt_message(self.hass, 'test_scale/brightness/status', '99')
self.hass.block_till_done()
light_state = self.hass.states.get('light.test')
self.hass.block_till_done()
self.assertEqual(255,
light_state.attributes['brightness'])
def test_white_value_controlling_scale(self):
"""Test the white_value controlling scale."""
with assert_setup_component(1, light.DOMAIN):
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test_scale/status',
'command_topic': 'test_scale/set',
'white_value_state_topic': 'test_scale/white_value/status',
'white_value_command_topic': 'test_scale/white_value/set',
'white_value_scale': '99',
'qos': 0,
'payload_on': 'on',
'payload_off': 'off'
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('white_value'))
self.assertFalse(state.attributes.get(ATTR_ASSUMED_STATE))
fire_mqtt_message(self.hass, 'test_scale/status', 'on')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual(255, state.attributes.get('white_value'))
fire_mqtt_message(self.hass, 'test_scale/status', 'off')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
fire_mqtt_message(self.hass, 'test_scale/status', 'on')
self.hass.block_till_done()
fire_mqtt_message(self.hass, 'test_scale/white_value/status', '99')
self.hass.block_till_done()
light_state = self.hass.states.get('light.test')
self.hass.block_till_done()
self.assertEqual(255,
light_state.attributes['white_value'])
def test_controlling_state_via_topic_with_templates(self): \
# pylint: disable=invalid-name
"""Test the setting og the state with a template."""
config = {light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test_light_rgb/status',
'command_topic': 'test_light_rgb/set',
'brightness_state_topic': 'test_light_rgb/brightness/status',
'color_temp_state_topic': 'test_light_rgb/color_temp/status',
'effect_state_topic': 'test_light_rgb/effect/status',
'rgb_state_topic': 'test_light_rgb/rgb/status',
'white_value_state_topic': 'test_light_rgb/white_value/status',
'xy_state_topic': 'test_light_rgb/xy/status',
'state_value_template': '{{ value_json.hello }}',
'brightness_value_template': '{{ value_json.hello }}',
'color_temp_value_template': '{{ value_json.hello }}',
'effect_value_template': '{{ value_json.hello }}',
'rgb_value_template': '{{ value_json.hello | join(",") }}',
'white_value_template': '{{ value_json.hello }}',
'xy_value_template': '{{ value_json.hello | join(",") }}',
}}
with assert_setup_component(1, light.DOMAIN):
assert setup_component(self.hass, light.DOMAIN, config)
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('brightness'))
self.assertIsNone(state.attributes.get('rgb_color'))
fire_mqtt_message(self.hass, 'test_light_rgb/rgb/status',
'{"hello": [1, 2, 3]}')
fire_mqtt_message(self.hass, 'test_light_rgb/status',
'{"hello": "ON"}')
fire_mqtt_message(self.hass, 'test_light_rgb/brightness/status',
'{"hello": "50"}')
fire_mqtt_message(self.hass, 'test_light_rgb/color_temp/status',
'{"hello": "300"}')
fire_mqtt_message(self.hass, 'test_light_rgb/effect/status',
'{"hello": "rainbow"}')
fire_mqtt_message(self.hass, 'test_light_rgb/white_value/status',
'{"hello": "75"}')
fire_mqtt_message(self.hass, 'test_light_rgb/xy/status',
'{"hello": [0.123,0.123]}')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual(50, state.attributes.get('brightness'))
self.assertEqual([1, 2, 3], state.attributes.get('rgb_color'))
self.assertEqual(300, state.attributes.get('color_temp'))
self.assertEqual('rainbow', state.attributes.get('effect'))
self.assertEqual(75, state.attributes.get('white_value'))
self.assertEqual([0.123, 0.123], state.attributes.get('xy_color'))
def test_sending_mqtt_commands_and_optimistic(self): \
# pylint: disable=invalid-name
"""Test the sending of command in optimistic mode."""
config = {light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'command_topic': 'test_light_rgb/set',
'brightness_command_topic': 'test_light_rgb/brightness/set',
'rgb_command_topic': 'test_light_rgb/rgb/set',
'color_temp_command_topic': 'test_light_rgb/color_temp/set',
'effect_command_topic': 'test_light_rgb/effect/set',
'white_value_command_topic': 'test_light_rgb/white_value/set',
'xy_command_topic': 'test_light_rgb/xy/set',
'qos': 2,
'payload_on': 'on',
'payload_off': 'off'
}}
with assert_setup_component(1, light.DOMAIN):
assert setup_component(self.hass, light.DOMAIN, config)
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertTrue(state.attributes.get(ATTR_ASSUMED_STATE))
light.turn_on(self.hass, 'light.test')
self.hass.block_till_done()
self.assertEqual(('test_light_rgb/set', 'on', 2, False),
self.mock_publish.mock_calls[-2][1])
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
light.turn_off(self.hass, 'light.test')
self.hass.block_till_done()
self.assertEqual(('test_light_rgb/set', 'off', 2, False),
self.mock_publish.mock_calls[-2][1])
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.mock_publish.reset_mock()
light.turn_on(self.hass, 'light.test',
brightness=50, xy_color=[0.123, 0.123])
light.turn_on(self.hass, 'light.test', rgb_color=[75, 75, 75],
white_value=80)
self.hass.block_till_done()
self.mock_publish().async_publish.assert_has_calls([
mock.call('test_light_rgb/set', 'on', 2, False),
mock.call('test_light_rgb/rgb/set', '75,75,75', 2, False),
mock.call('test_light_rgb/brightness/set', 50, 2, False),
mock.call('test_light_rgb/white_value/set', 80, 2, False),
mock.call('test_light_rgb/xy/set', '0.123,0.123', 2, False),
], any_order=True)
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual((75, 75, 75), state.attributes['rgb_color'])
self.assertEqual(50, state.attributes['brightness'])
self.assertEqual(80, state.attributes['white_value'])
self.assertEqual((0.123, 0.123), state.attributes['xy_color'])
def test_sending_mqtt_rgb_command_with_template(self):
"""Test the sending of RGB command with template."""
config = {light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'command_topic': 'test_light_rgb/set',
'rgb_command_topic': 'test_light_rgb/rgb/set',
'rgb_command_template': '{{ "#%02x%02x%02x" | '
'format(red, green, blue)}}',
'payload_on': 'on',
'payload_off': 'off',
'qos': 0
}}
with assert_setup_component(1, light.DOMAIN):
assert setup_component(self.hass, light.DOMAIN, config)
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
light.turn_on(self.hass, 'light.test', rgb_color=[255, 255, 255])
self.hass.block_till_done()
self.mock_publish().async_publish.assert_has_calls([
mock.call('test_light_rgb/set', 'on', 0, False),
mock.call('test_light_rgb/rgb/set', '#ffffff', 0, False),
], any_order=True)
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual((255, 255, 255), state.attributes['rgb_color'])
def test_show_brightness_if_only_command_topic(self):
"""Test the brightness if only a command topic is present."""
config = {light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'brightness_command_topic': 'test_light_rgb/brightness/set',
'command_topic': 'test_light_rgb/set',
'state_topic': 'test_light_rgb/status',
}}
with assert_setup_component(1, light.DOMAIN):
assert setup_component(self.hass, light.DOMAIN, config)
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('brightness'))
fire_mqtt_message(self.hass, 'test_light_rgb/status', 'ON')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual(255, state.attributes.get('brightness'))
def test_show_color_temp_only_if_command_topic(self):
"""Test the color temp only if a command topic is present."""
config = {light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'color_temp_command_topic': 'test_light_rgb/brightness/set',
'command_topic': 'test_light_rgb/set',
'state_topic': 'test_light_rgb/status'
}}
with assert_setup_component(1, light.DOMAIN):
assert setup_component(self.hass, light.DOMAIN, config)
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('color_temp'))
fire_mqtt_message(self.hass, 'test_light_rgb/status', 'ON')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual(150, state.attributes.get('color_temp'))
def test_show_effect_only_if_command_topic(self):
"""Test the color temp only if a command topic is present."""
config = {light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'effect_command_topic': 'test_light_rgb/effect/set',
'command_topic': 'test_light_rgb/set',
'state_topic': 'test_light_rgb/status'
}}
with assert_setup_component(1, light.DOMAIN):
assert setup_component(self.hass, light.DOMAIN, config)
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('effect'))
fire_mqtt_message(self.hass, 'test_light_rgb/status', 'ON')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual('none', state.attributes.get('effect'))
def test_show_white_value_if_only_command_topic(self):
"""Test the white_value if only a command topic is present."""
config = {light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'white_value_command_topic': 'test_light_rgb/white_value/set',
'command_topic': 'test_light_rgb/set',
'state_topic': 'test_light_rgb/status',
}}
with assert_setup_component(1, light.DOMAIN):
assert setup_component(self.hass, light.DOMAIN, config)
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('white_value'))
fire_mqtt_message(self.hass, 'test_light_rgb/status', 'ON')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual(255, state.attributes.get('white_value'))
def test_show_xy_if_only_command_topic(self):
"""Test the xy if only a command topic is present."""
config = {light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'xy_command_topic': 'test_light_rgb/xy/set',
'command_topic': 'test_light_rgb/set',
'state_topic': 'test_light_rgb/status',
}}
with assert_setup_component(1, light.DOMAIN):
assert setup_component(self.hass, light.DOMAIN, config)
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('xy_color'))
fire_mqtt_message(self.hass, 'test_light_rgb/status', 'ON')
self.hass.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual([1, 1], state.attributes.get('xy_color'))
def test_on_command_first(self):
"""Test on command being sent before brightness."""
config = {light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'command_topic': 'test_light/set',
'brightness_command_topic': 'test_light/bright',
'on_command_type': 'first',
}}
with assert_setup_component(1, light.DOMAIN):
assert setup_component(self.hass, light.DOMAIN, config)
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
light.turn_on(self.hass, 'light.test', brightness=50)
self.hass.block_till_done()
# Should get the following MQTT messages.
# test_light/set: 'ON'
# test_light/bright: 50
self.assertEqual(('test_light/set', 'ON', 0, False),
self.mock_publish.mock_calls[-4][1])
self.assertEqual(('test_light/bright', 50, 0, False),
self.mock_publish.mock_calls[-2][1])
light.turn_off(self.hass, 'light.test')
self.hass.block_till_done()
self.assertEqual(('test_light/set', 'OFF', 0, False),
self.mock_publish.mock_calls[-2][1])
def test_on_command_last(self):
"""Test on command being sent after brightness."""
config = {light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'command_topic': 'test_light/set',
'brightness_command_topic': 'test_light/bright',
}}
with assert_setup_component(1, light.DOMAIN):
assert setup_component(self.hass, light.DOMAIN, config)
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
light.turn_on(self.hass, 'light.test', brightness=50)
self.hass.block_till_done()
# Should get the following MQTT messages.
# test_light/bright: 50
# test_light/set: 'ON'
self.assertEqual(('test_light/bright', 50, 0, False),
self.mock_publish.mock_calls[-4][1])
self.assertEqual(('test_light/set', 'ON', 0, False),
self.mock_publish.mock_calls[-2][1])
light.turn_off(self.hass, 'light.test')
self.hass.block_till_done()
self.assertEqual(('test_light/set', 'OFF', 0, False),
self.mock_publish.mock_calls[-2][1])
def test_on_command_brightness(self):
"""Test on command being sent as only brightness."""
config = {light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'command_topic': 'test_light/set',
'brightness_command_topic': 'test_light/bright',
'rgb_command_topic': "test_light/rgb",
'on_command_type': 'brightness',
}}
with assert_setup_component(1, light.DOMAIN):
assert setup_component(self.hass, light.DOMAIN, config)
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
# Turn on w/ no brightness - should set to max
light.turn_on(self.hass, 'light.test')
self.hass.block_till_done()
# Should get the following MQTT messages.
# test_light/bright: 255
self.assertEqual(('test_light/bright', 255, 0, False),
self.mock_publish.mock_calls[-2][1])
light.turn_off(self.hass, 'light.test')
self.hass.block_till_done()
self.assertEqual(('test_light/set', 'OFF', 0, False),
self.mock_publish.mock_calls[-2][1])
# Turn on w/ brightness
light.turn_on(self.hass, 'light.test', brightness=50)
self.hass.block_till_done()
self.assertEqual(('test_light/bright', 50, 0, False),
self.mock_publish.mock_calls[-2][1])
light.turn_off(self.hass, 'light.test')
self.hass.block_till_done()
# Turn on w/ just a color to insure brightness gets
# added and sent.
light.turn_on(self.hass, 'light.test', rgb_color=[75, 75, 75])
self.hass.block_till_done()
self.assertEqual(('test_light/rgb', '75,75,75', 0, False),
self.mock_publish.mock_calls[-4][1])
self.assertEqual(('test_light/bright', 50, 0, False),
self.mock_publish.mock_calls[-2][1])
|
|
import unittest
from ctypes import *
from ctypes.test import need_symbol
import _ctypes_test
class Callbacks(unittest.TestCase):
functype = CFUNCTYPE
## def tearDown(self):
## import gc
## gc.collect()
def callback(self, *args):
self.got_args = args
return args[-1]
def check_type(self, typ, arg):
PROTO = self.functype.im_func(typ, typ)
result = PROTO(self.callback)(arg)
if typ == c_float:
self.assertAlmostEqual(result, arg, places=5)
else:
self.assertEqual(self.got_args, (arg,))
self.assertEqual(result, arg)
PROTO = self.functype.im_func(typ, c_byte, typ)
result = PROTO(self.callback)(-3, arg)
if typ == c_float:
self.assertAlmostEqual(result, arg, places=5)
else:
self.assertEqual(self.got_args, (-3, arg))
self.assertEqual(result, arg)
################
def test_byte(self):
self.check_type(c_byte, 42)
self.check_type(c_byte, -42)
def test_ubyte(self):
self.check_type(c_ubyte, 42)
def test_short(self):
self.check_type(c_short, 42)
self.check_type(c_short, -42)
def test_ushort(self):
self.check_type(c_ushort, 42)
def test_int(self):
self.check_type(c_int, 42)
self.check_type(c_int, -42)
def test_uint(self):
self.check_type(c_uint, 42)
def test_long(self):
self.check_type(c_long, 42)
self.check_type(c_long, -42)
def test_ulong(self):
self.check_type(c_ulong, 42)
def test_longlong(self):
# test some 64-bit values, positive and negative
self.check_type(c_longlong, 5948291757245277467)
self.check_type(c_longlong, -5229388909784190580)
self.check_type(c_longlong, 42)
self.check_type(c_longlong, -42)
def test_ulonglong(self):
# test some 64-bit values, with and without msb set.
self.check_type(c_ulonglong, 10955412242170339782)
self.check_type(c_ulonglong, 3665885499841167458)
self.check_type(c_ulonglong, 42)
def test_float(self):
# only almost equal: double -> float -> double
import math
self.check_type(c_float, math.e)
self.check_type(c_float, -math.e)
def test_double(self):
self.check_type(c_double, 3.14)
self.check_type(c_double, -3.14)
def test_longdouble(self):
self.check_type(c_longdouble, 3.14)
self.check_type(c_longdouble, -3.14)
def test_char(self):
self.check_type(c_char, "x")
self.check_type(c_char, "a")
# disabled: would now (correctly) raise a RuntimeWarning about
# a memory leak. A callback function cannot return a non-integral
# C type without causing a memory leak.
@unittest.skip('test disabled')
def test_char_p(self):
self.check_type(c_char_p, "abc")
self.check_type(c_char_p, "def")
def test_pyobject(self):
o = ()
from sys import getrefcount as grc
for o in (), [], object():
initial = grc(o)
# This call leaks a reference to 'o'...
self.check_type(py_object, o)
before = grc(o)
# ...but this call doesn't leak any more. Where is the refcount?
self.check_type(py_object, o)
after = grc(o)
self.assertEqual((after, o), (before, o))
def test_unsupported_restype_1(self):
# Only "fundamental" result types are supported for callback
# functions, the type must have a non-NULL stgdict->setfunc.
# POINTER(c_double), for example, is not supported.
prototype = self.functype.im_func(POINTER(c_double))
# The type is checked when the prototype is called
self.assertRaises(TypeError, prototype, lambda: None)
def test_unsupported_restype_2(self):
prototype = self.functype.im_func(object)
self.assertRaises(TypeError, prototype, lambda: None)
def test_issue_7959(self):
proto = self.functype.im_func(None)
class X(object):
def func(self): pass
def __init__(self):
self.v = proto(self.func)
import gc
for i in range(32):
X()
gc.collect()
live = [x for x in gc.get_objects()
if isinstance(x, X)]
self.assertEqual(len(live), 0)
def test_issue12483(self):
import gc
class Nasty:
def __del__(self):
gc.collect()
CFUNCTYPE(None)(lambda x=Nasty(): None)
@need_symbol('WINFUNCTYPE')
class StdcallCallbacks(Callbacks):
try:
functype = WINFUNCTYPE
except NameError:
pass
################################################################
class SampleCallbacksTestCase(unittest.TestCase):
def test_integrate(self):
# Derived from some then non-working code, posted by David Foster
dll = CDLL(_ctypes_test.__file__)
# The function prototype called by 'integrate': double func(double);
CALLBACK = CFUNCTYPE(c_double, c_double)
# The integrate function itself, exposed from the _ctypes_test dll
integrate = dll.integrate
integrate.argtypes = (c_double, c_double, CALLBACK, c_long)
integrate.restype = c_double
def func(x):
return x**2
result = integrate(0.0, 1.0, CALLBACK(func), 10)
diff = abs(result - 1./3.)
self.assertLess(diff, 0.01, "%s not less than 0.01" % diff)
def test_issue_8959_a(self):
from ctypes.util import find_library
libc_path = find_library("c")
if not libc_path:
self.skipTest('could not find libc')
libc = CDLL(libc_path)
@CFUNCTYPE(c_int, POINTER(c_int), POINTER(c_int))
def cmp_func(a, b):
return a[0] - b[0]
array = (c_int * 5)(5, 1, 99, 7, 33)
libc.qsort(array, len(array), sizeof(c_int), cmp_func)
self.assertEqual(array[:], [1, 5, 7, 33, 99])
@need_symbol('WINFUNCTYPE')
def test_issue_8959_b(self):
from ctypes.wintypes import BOOL, HWND, LPARAM
global windowCount
windowCount = 0
@WINFUNCTYPE(BOOL, HWND, LPARAM)
def EnumWindowsCallbackFunc(hwnd, lParam):
global windowCount
windowCount += 1
return True #Allow windows to keep enumerating
windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0)
def test_callback_register_int(self):
# Issue #8275: buggy handling of callback args under Win64
# NOTE: should be run on release builds as well
dll = CDLL(_ctypes_test.__file__)
CALLBACK = CFUNCTYPE(c_int, c_int, c_int, c_int, c_int, c_int)
# All this function does is call the callback with its args squared
func = dll._testfunc_cbk_reg_int
func.argtypes = (c_int, c_int, c_int, c_int, c_int, CALLBACK)
func.restype = c_int
def callback(a, b, c, d, e):
return a + b + c + d + e
result = func(2, 3, 4, 5, 6, CALLBACK(callback))
self.assertEqual(result, callback(2*2, 3*3, 4*4, 5*5, 6*6))
def test_callback_register_double(self):
# Issue #8275: buggy handling of callback args under Win64
# NOTE: should be run on release builds as well
dll = CDLL(_ctypes_test.__file__)
CALLBACK = CFUNCTYPE(c_double, c_double, c_double, c_double,
c_double, c_double)
# All this function does is call the callback with its args squared
func = dll._testfunc_cbk_reg_double
func.argtypes = (c_double, c_double, c_double,
c_double, c_double, CALLBACK)
func.restype = c_double
def callback(a, b, c, d, e):
return a + b + c + d + e
result = func(1.1, 2.2, 3.3, 4.4, 5.5, CALLBACK(callback))
self.assertEqual(result,
callback(1.1*1.1, 2.2*2.2, 3.3*3.3, 4.4*4.4, 5.5*5.5))
################################################################
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python2
# encoding: utf-8
"""
cache.py
Simple functionality to help to avoid reading in file data multiple times. Motivated by a practical use of the relatrix code.
Created by Shane O'Connor 2016.
"""
import os
import datetime
import operator
import traceback
import json
from klab import colortext
from klab.bio.rcsb import download_pdb, retrieve_pdb, download_fasta, retrieve_fasta
from klab.bio.rcsb import download_xml as download_pdbml
from klab.bio.rcsb import retrieve_xml as retrieve_pdbml
from klab.bio.pdb import PDB
from klab.bio.sifts import SIFTS
from klab.bio.sifts import retrieve_xml as retrieve_sifts_xml
from klab.bio.sifts import download_xml as download_sifts_xml
from klab.bio.pdbml import PDBML
from klab.bio.fasta import FASTA
from klab.fs.fsio import read_file, write_file
from klab.hash.CRC64 import CRC64digest
class CacheNode(object):
'''Simple class to store an object and the time of insertion.'''
def __init__(self, payload):
self.t = datetime.datetime.now()
self.o = payload
def get(self):
'''Refresh the access time and return the object.'''
self.t = datetime.datetime.now()
return self.o
def __repr__(self): return '{0}: {1}'.format(self.t, self.o.__repr__()[:50])
def __cmp__(self, other): return (self.t).__cmp__(other.t)
def __gt__(self, other): return (self.t).__gt__(other.t)
def __ge__(self, other): return (self.t).__ge__(other.t)
def __lt__(self, other): return (self.t).__lt__(other.t)
def __le__(self, other): return (self.t).__le__(other.t)
def __eq__(self, other): return (self.t).__eq__(other.t)
def __ne__(self, other): return (self.t).__ne__(other.t)
class CacheNodeDict(dict):
def __getitem__(self, k):
return dict.__getitem__(self, k).get()
class BioCache(object):
'''Class to store a cache of klab.bio objects. This can be used to avoid reading the same data in from disk over
and over again.
'''
def __init__(self, cache_dir = None, max_capacity = None, silent = True):
'''max_capacity is currently used to set the maximum capacity of all object lists i.e. you cannot currently set different
max capacities for different lists.'''
if cache_dir:
assert(os.path.exists(cache_dir))
if max_capacity != None:
max_capacity = int(max_capacity)
assert(max_capacity >= 1)
self.cache_dir = cache_dir
# PDB files
self.pdb_contents = CacheNodeDict()
self.pdb_objects = CacheNodeDict()
# SIFTS XML files
self.sifts_xml_contents = CacheNodeDict()
self.sifts_objects = CacheNodeDict()
# PDBML files
self.pdbml_contents = CacheNodeDict()
self.pdbml_objects = CacheNodeDict()
# FASTA files
self.fasta_contents = CacheNodeDict()
self.fasta_objects = CacheNodeDict()
self.max_capacity = max_capacity
self.silent = silent
def log(self, msg):
if not self.silent:
colortext.plightpurple(msg)
def log_lookup(self, msg):
self.log('CACHE LOOKUP: {0}'.format(msg))
#self.log('CACHE LOOKUP: {0}.\n{1}'.format(msg, '\n'.join([l[:-1] for l in traceback.format_stack()])))
def add_node(self, container, k, v):
if self.max_capacity and (len(container) + 1) > self.max_capacity:
# Truncate container contents
keys_to_delete = [t[0] for t in sorted(list(container.items()), key=operator.itemgetter(1))[:-(self.max_capacity - 1)]] # sort by datetime of insertion and keep the last self.max_capacity minus one objects (to allow space for one more object)
for dk in keys_to_delete:
del container[dk]
container[k] = CacheNode(v)
######################
# PDB files
######################
def add_pdb_contents(self, pdb_id, contents):
self.add_node(self.pdb_contents, pdb_id.upper(), contents)
def get_pdb_contents(self, pdb_id):
self.log_lookup('pdb contents {0}'.format(pdb_id))
pdb_id = pdb_id.upper()
if not self.pdb_contents.get(pdb_id):
if self.pdb_objects.get(pdb_id):
self.add_pdb_contents(pdb_id, '\n'.join(self.pdb_objects[pdb_id].lines))
elif self.cache_dir:
self.add_pdb_contents(pdb_id, download_pdb(pdb_id, self.cache_dir, silent = True))
else:
self.add_pdb_contents(pdb_id, retrieve_pdb(pdb_id, silent = True))
return self.pdb_contents[pdb_id]
def add_pdb_object(self, pdb_id, pdb_object):
self.add_node(self.pdb_objects, pdb_id.upper(), pdb_object)
def get_pdb_object(self, pdb_id):
self.log_lookup('pdb object {0}'.format(pdb_id))
pdb_id = pdb_id.upper()
if not self.pdb_objects.get(pdb_id):
if not self.pdb_contents.get(pdb_id):
if self.cache_dir:
self.add_pdb_contents(pdb_id, download_pdb(pdb_id, self.cache_dir, silent = True))
else:
self.add_pdb_contents(pdb_id, retrieve_pdb(pdb_id, silent = True))
self.add_pdb_object(pdb_id, PDB(self.pdb_contents[pdb_id]))
return self.pdb_objects[pdb_id]
######################
# SIFTS XML files
######################
def add_sifts_xml_contents(self, pdb_id, sifts_xml_contents):
self.add_node(self.sifts_xml_contents, pdb_id.upper(), sifts_xml_contents)
def get_sifts_xml_contents(self, pdb_id):
self.log_lookup('SIFTS xml {0}'.format(pdb_id))
pdb_id = pdb_id.upper()
if not self.sifts_xml_contents.get(pdb_id):
if self.sifts_objects.get(pdb_id):
self.add_sifts_xml_contents(pdb_id, self.sifts_objects[pdb_id].xml_contents)
elif self.cache_dir:
self.add_sifts_xml_contents(pdb_id, download_sifts_xml(pdb_id, self.cache_dir, silent = True))
else:
self.add_sifts_xml_contents(pdb_id, retrieve_sifts_xml(pdb_id, silent = True))
return self.sifts_xml_contents[pdb_id]
def add_sifts_object(self, pdb_id, sifts_object):
self.add_node(self.sifts_objects, pdb_id.upper(), sifts_object)
def get_sifts_object(self, pdb_id, acceptable_sequence_percentage_match = 90.0, restrict_match_percentage_errors_to_these_uniparc_ids = None):
# todo: we need to store all/important parameters for object creation and key on those as well e.g. "give me the SIFTS object with , restrict_match_percentage_errors_to_these_uniparc_ids = <some_set>"
# otherwise, unexpected behavior may occur
self.log_lookup('SIFTS object {0}'.format(pdb_id))
pdb_id = pdb_id.upper()
if not self.sifts_objects.get(pdb_id):
if not self.sifts_xml_contents.get(pdb_id):
if self.cache_dir:
self.add_sifts_xml_contents(pdb_id, download_sifts_xml(pdb_id, self.cache_dir, silent = True))
else:
self.add_sifts_xml_contents(pdb_id, retrieve_sifts_xml(pdb_id, silent = True))
self.add_sifts_object(pdb_id, SIFTS.retrieve(pdb_id, cache_dir = self.cache_dir, acceptable_sequence_percentage_match = acceptable_sequence_percentage_match, bio_cache = self, restrict_match_percentage_errors_to_these_uniparc_ids = restrict_match_percentage_errors_to_these_uniparc_ids))
return self.sifts_objects[pdb_id]
######################
# PDBML files
######################
def add_pdbml_contents(self, pdb_id, pdbml_contents):
self.add_node(self.pdbml_contents, pdb_id.upper(), pdbml_contents)
def get_pdbml_contents(self, pdb_id):
self.log_lookup('PDBML {0}'.format(pdb_id))
pdb_id = pdb_id.upper()
if not self.pdbml_contents.get(pdb_id):
if self.pdbml_objects.get(pdb_id):
self.add_pdbml_contents(pdb_id, self.pdbml_objects[pdb_id].xml_contents)
elif self.cache_dir:
self.add_pdbml_contents(pdb_id, download_pdbml(pdb_id, self.cache_dir, silent = True))
else:
self.add_pdbml_contents(pdb_id, retrieve_pdbml(pdb_id, silent = True))
return self.pdbml_contents[pdb_id]
def add_pdbml_object(self, pdb_id, pdbml_object):
self.add_node(self.pdbml_objects, pdb_id.upper(), pdbml_object)
def get_pdbml_object(self, pdb_id, acceptable_sequence_percentage_match = 90.0):
self.log_lookup('PDBML object {0}'.format(pdb_id))
pdb_id = pdb_id.upper()
if not self.pdbml_objects.get(pdb_id):
if not self.pdbml_contents.get(pdb_id):
if self.cache_dir:
self.add_pdbml_contents(pdb_id, download_pdbml(pdb_id, self.cache_dir, silent = True))
else:
self.add_pdbml_contents(pdb_id, retrieve_pdbml(pdb_id, silent = True))
self.add_pdbml_object(pdb_id, PDBML.retrieve(pdb_id, cache_dir = self.cache_dir, bio_cache = self))
return self.pdbml_objects[pdb_id]
######################
# FASTA files
######################
def add_fasta_contents(self, pdb_id, fasta_contents):
self.add_node(self.fasta_contents, pdb_id.upper(), fasta_contents)
def get_fasta_contents(self, pdb_id):
self.log_lookup('FASTA {0}'.format(pdb_id))
pdb_id = pdb_id.upper()
if not self.fasta_contents.get(pdb_id):
if self.fasta_objects.get(pdb_id):
self.add_fasta_contents(pdb_id, self.fasta_objects[pdb_id].fasta_contents)
elif self.cache_dir:
self.add_fasta_contents(pdb_id, download_fasta(pdb_id, self.cache_dir, silent = True))
else:
self.add_fasta_contents(pdb_id, retrieve_fasta(pdb_id, silent = True))
return self.fasta_contents[pdb_id]
def add_fasta_object(self, pdb_id, fasta_object):
self.add_node(self.fasta_objects, pdb_id.upper(), fasta_object)
def get_fasta_object(self, pdb_id, acceptable_sequence_percentage_match = 90.0):
self.log_lookup('FASTA object {0}'.format(pdb_id))
pdb_id = pdb_id.upper()
if not self.fasta_objects.get(pdb_id):
if not self.fasta_contents.get(pdb_id):
if self.cache_dir:
self.add_fasta_contents(pdb_id, download_fasta(pdb_id, self.cache_dir, silent = True))
else:
self.add_fasta_contents(pdb_id, retrieve_fasta(pdb_id, silent = True))
self.add_fasta_object(pdb_id, FASTA.retrieve(pdb_id, cache_dir = self.cache_dir, bio_cache = self))
return self.fasta_objects[pdb_id]
######################
# BLAST results
######################
def _get_blast_pdb_filepath(self, pdb_id, chain_id, cut_off, matrix, sequence_identity_cut_off):
assert(self.cache_dir)
return os.path.join(self.cache_dir, '{0}_{1}_{2}_{3}_{4}.BLAST.json'.format(pdb_id.upper(), chain_id, cut_off, matrix, sequence_identity_cut_off))
def load_pdb_chain_blast(self, pdb_id, chain_id, cut_off, matrix, sequence_identity_cut_off):
if self.cache_dir:
filepath = self._get_blast_pdb_filepath(pdb_id, chain_id, cut_off, matrix, sequence_identity_cut_off)
if os.path.exists(filepath):
return json.loads(read_file(filepath))
return None
def save_pdb_chain_blast(self, pdb_id, chain_id, cut_off, matrix, sequence_identity_cut_off, data):
if self.cache_dir:
filepath = self._get_blast_pdb_filepath(pdb_id, chain_id, cut_off, matrix, sequence_identity_cut_off)
write_file(filepath, json.dumps(data))
return True
return False
def _get_blast_sequence_filepath(self, sequence, cut_off, matrix, sequence_identity_cut_off):
assert(self.cache_dir)
id = '{0}_{1}_{2}_{3}'.format(CRC64digest(sequence), len(sequence), sequence[:5], sequence[-5:])
return os.path.join(self.cache_dir, '{0}_{1}_{2}_{3}.BLAST.json'.format(id, cut_off, matrix, sequence_identity_cut_off))
def load_sequence_blast(self, sequence, cut_off, matrix, sequence_identity_cut_off):
if self.cache_dir:
filepath = self._get_blast_sequence_filepath(sequence, cut_off, matrix, sequence_identity_cut_off)
if os.path.exists(filepath):
for sequence_hits in json.loads(read_file(filepath)):
if sequence_hits['sequence'] == sequence:
return sequence_hits
return None
def save_sequence_blast(self, sequence, cut_off, matrix, sequence_identity_cut_off, data):
assert(data['sequence'] == sequence)
sequence_data = [data] # put the new hit at the start of the file
if self.cache_dir:
filepath = self._get_blast_sequence_filepath(sequence, cut_off, matrix, sequence_identity_cut_off)
if os.path.exists(filepath):
for sequence_hits in json.loads(read_file(filepath)):
if sequence_hits['sequence'] != sequence:
sequence_data.append(sequence_hits)
write_file(filepath, json.dumps(sequence_data))
return True
return False
######################
# Static methods
######################
@staticmethod
def static_get_pdb_object(pdb_id, bio_cache = None, cache_dir = None):
'''This method does not necessarily use a BioCache but it seems to fit here.'''
pdb_id = pdb_id.upper()
if bio_cache:
return bio_cache.get_pdb_object(pdb_id)
if cache_dir:
# Check to see whether we have a cached copy of the PDB file
filepath = os.path.join(cache_dir, '{0}.pdb'.format(pdb_id))
if os.path.exists(filepath):
return PDB.from_filepath(filepath)
# Get any missing files from the RCSB and create cached copies if appropriate
pdb_contents = retrieve_pdb(pdb_id)
if cache_dir:
write_file(os.path.join(cache_dir, "%s.pdb" % pdb_id), pdb_contents)
return PDB(pdb_contents)
|
|
def __rope_start_everything():
import os
import sys
import socket
import pickle
import marshal
import inspect
import types
import threading
class _MessageSender(object):
def send_data(self, data):
pass
class _SocketSender(_MessageSender):
def __init__(self, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', port))
self.my_file = s.makefile('wb')
def send_data(self, data):
if not self.my_file.closed:
pickle.dump(data, self.my_file)
def close(self):
self.my_file.close()
class _FileSender(_MessageSender):
def __init__(self, file_name):
self.my_file = open(file_name, 'wb')
def send_data(self, data):
if not self.my_file.closed:
marshal.dump(data, self.my_file)
def close(self):
self.my_file.close()
def _cached(func):
cache = {}
def newfunc(self, arg):
if arg in cache:
return cache[arg]
result = func(self, arg)
cache[arg] = result
return result
return newfunc
class _FunctionCallDataSender(object):
def __init__(self, send_info, project_root):
self.project_root = project_root
if send_info.isdigit():
self.sender = _SocketSender(int(send_info))
else:
self.sender = _FileSender(send_info)
def global_trace(frame, event, arg):
# HACK: Ignoring out->in calls
# This might lose some information
if self._is_an_interesting_call(frame):
return self.on_function_call
sys.settrace(global_trace)
threading.settrace(global_trace)
def on_function_call(self, frame, event, arg):
if event != 'return':
return
args = []
returned = ('unknown',)
code = frame.f_code
for argname in code.co_varnames[:code.co_argcount]:
try:
args.append(self._object_to_persisted_form(frame.f_locals[argname]))
except (TypeError, AttributeError):
args.append(('unknown',))
try:
returned = self._object_to_persisted_form(arg)
except (TypeError, AttributeError):
pass
try:
data = (self._object_to_persisted_form(frame.f_code),
tuple(args), returned)
self.sender.send_data(data)
except (TypeError):
pass
return self.on_function_call
def _is_an_interesting_call(self, frame):
#if frame.f_code.co_name in ['?', '<module>']:
# return False
#return not frame.f_back or not self._is_code_inside_project(frame.f_back.f_code)
if not self._is_code_inside_project(frame.f_code) and \
(not frame.f_back or not self._is_code_inside_project(frame.f_back.f_code)):
return False
return True
def _is_code_inside_project(self, code):
source = self._path(code.co_filename)
return source is not None and os.path.exists(source) and \
_realpath(source).startswith(self.project_root)
@_cached
def _get_persisted_code(self, object_):
source = self._path(object_.co_filename)
if not os.path.exists(source):
raise TypeError('no source')
return ('defined', _realpath(source), str(object_.co_firstlineno))
@_cached
def _get_persisted_class(self, object_):
try:
return ('defined', _realpath(inspect.getsourcefile(object_)),
object_.__name__)
except (TypeError, AttributeError):
return ('unknown',)
def _get_persisted_builtin(self, object_):
if isinstance(object_, str):
return ('builtin', 'str')
if isinstance(object_, list):
holding = None
if len(object_) > 0:
holding = object_[0]
return ('builtin', 'list', self._object_to_persisted_form(holding))
if isinstance(object_, dict):
keys = None
values = None
if len(object_) > 0:
keys = list(object_.keys())[0]
values = object_[keys]
if values == object_ and len(object_) > 1:
keys = list(object_.keys())[1]
values = object_[keys]
return ('builtin', 'dict',
self._object_to_persisted_form(keys),
self._object_to_persisted_form(values))
if isinstance(object_, tuple):
objects = []
if len(object_) < 3:
for holding in object_:
objects.append(self._object_to_persisted_form(holding))
else:
objects.append(self._object_to_persisted_form(object_[0]))
return tuple(['builtin', 'tuple'] + objects)
if isinstance(object_, set):
holding = None
if len(object_) > 0:
for o in object_:
holding = o
break
return ('builtin', 'set', self._object_to_persisted_form(holding))
return ('unknown',)
def _object_to_persisted_form(self, object_):
if object_ is None:
return ('none',)
if isinstance(object_, types.CodeType):
return self._get_persisted_code(object_)
if isinstance(object_, types.FunctionType):
return self._get_persisted_code(object_.__code__)
if isinstance(object_, types.MethodType):
return self._get_persisted_code(object_.__func__.__code__)
if isinstance(object_, types.ModuleType):
return self._get_persisted_module(object_)
if isinstance(object_, (str, list, dict, tuple, set)):
return self._get_persisted_builtin(object_)
if isinstance(object_, type):
return self._get_persisted_class(object_)
return ('instance', self._get_persisted_class(type(object_)))
@_cached
def _get_persisted_module(self, object_):
path = self._path(object_.__file__)
if path and os.path.exists(path):
return ('defined', _realpath(path))
return ('unknown',)
def _path(self, path):
if path.endswith('.pyc'):
path = path[:-1]
if path.endswith('.py'):
return path
def close(self):
self.sender.close()
sys.settrace(None)
def _realpath(path):
return os.path.realpath(os.path.abspath(os.path.expanduser(path)))
send_info = sys.argv[1]
project_root = sys.argv[2]
file_to_run = sys.argv[3]
run_globals = globals()
run_globals.update({'__name__': '__main__',
'builtins': __builtins__,
'__file__': file_to_run})
if send_info != '-':
data_sender = _FunctionCallDataSender(send_info, project_root)
del sys.argv[1:4]
with open(file_to_run) as file:
exec(compile(file.read(), file_to_run, 'exec'), run_globals)
if send_info != '-':
data_sender.close()
if __name__ == '__main__':
__rope_start_everything()
|
|
#!/usr/bin/env python
import os.path
from PySide import QtCore, QtGui, QtOpenGL
from PySide.QtCore import Qt
from PySide.QtGui import QStyle, QBrush, QColor, QPen, QIcon, QPalette
from juma.core import signals, app, AssetRefType
from juma.moai.MOAIRuntime import MOAIRuntime, MOAILuaDelegate, isLuaInstance, getLuaClassName
from juma.qt.IconCache import getIcon
from juma.qt.controls.GenericTreeWidget import GenericTreeWidget, GenericTreeFilter
from juma.MainEditor.MainEditor import MainEditorModule
from juma.SearchView import requestSearchView, registerSearchEnumerator
##----------------------------------------------------------------##
def _getModulePath( path ):
return os.path.dirname( __file__ ) + '/' + path
##----------------------------------------------------------------##
class GraphEditor( MainEditorModule ):
_name = 'graph_editor'
_dependency = [ 'qt', 'moai', 'main_editor' ]
def __init__(self):
super( GraphEditor, self ).__init__()
self.delegate = None
self.dirty = False
self.previewing = False
self.sceneType = "scene"
def onLoad( self ):
self.windowTitle = 'Hierarchy'
self.window = self.requestDockWindow( 'GraphEditor',
title = self.windowTitle,
dock = 'left',
size = (300,200),
minSize = (300,200),
)
self.treeFilter = self.window.addWidget(
GenericTreeFilter(
self.window
),
expanding = False
)
self.tree = self.window.addWidget(
GraphTreeWidget(
self.window,
sorting = True,
editable = True,
multiple_selection = True,
drag_mode = 'internal'
)
)
self.treeFilter.setTargetTree( self.tree )
self.tree.module = self
self.tool = self.addToolBar( 'hierarchy', self.window.addToolBar() )
self.delegate = MOAILuaDelegate( self )
self.delegate.load( _getModulePath( 'GraphEditor.lua' ) )
self.addTool( 'hierarchy/create_entity', label ='Create', icon = 'plus_mint' )
self.addTool( 'hierarchy/remove_entity', label ='Remove', icon = 'minus' )
# MENU
self.findMenu( 'main/entity' ).addChild([
dict( name = 'create_entity', label = 'Create', shortcut = 'ctrl+N' ),
# '----',
# dict( name = 'add_component', label = 'Add component' ),
'----',
dict( name = 'clone_entity', label = 'Clone', shortcut = 'ctrl+D' ),
'----',
dict( name = 'remove_entity', label = 'Remove' ),
'----',
], self )
self.addMenu( 'component_context', dict( label = 'Selected Component' ) )
self.addMenuItem( 'component_context/remove_component',
dict( label = 'Remove' )
)
# SIGNALS
signals.connect( 'moai.clean', self.onMoaiClean )
signals.connect( 'selection.changed', self.onSelectionChanged )
signals.connect( 'selection.hint', self.onSelectionHint )
signals.connect( 'scene.change', self.onSceneChange )
signals.connect( 'scene.settings', self.onSceneSettings )
signals.connect( 'entity.added', self.onEntityAdded )
signals.connect( 'entity.removed', self.onEntityRemoved )
signals.connect( 'entity.renamed', self.onEntityRenamed )
signals.connect( 'entity.modified', self.onEntityModified )
signals.connect( 'entity.visible_changed', self.onEntityVisibleChanged )
signals.connect( 'entity.pickable_changed', self.onEntityPickableChanged )
signals.connect( 'component.added', self.onComponentAdded )
signals.connect( 'component.removed', self.onComponentRemoved )
# ENUMERATORS
registerSearchEnumerator( uiNameSearchEnumerator )
registerSearchEnumerator( entityNameSearchEnumerator )
registerSearchEnumerator( componentNameSearchEnumerator )
def getActiveSceneRootGroup( self ):
rootNode = self.delegate.safeCallMethod( 'editor', 'getSceneRootGroup' )
if rootNode:
return rootNode
return None
def markDirty( self, dirty = True ):
if not self.previewing:
self.dirty = dirty
def onSceneSettings( self, scene ):
self.tree.selectNode( None )
selection = []
if scene:
selection.append( scene )
self.changeSelection( selection )
def onSceneChange(self, scene):
self.tree.hide()
if scene:
self.sceneType = scene.SCENE_TYPE
self.delegate.safeCallMethod( 'editor', 'changeScene', scene )
self.tree.rebuild()
# self.restoreWorkspaceState()
self.tree.refreshAllContent()
self.tree.verticalScrollBar().setValue( 0 )
self.tree.show()
signals.emitNow( 'scene.open', scene )
##----------------------------------------------------------------##
def createEntity( self ):
requestSearchView(
info = 'select entity type to create',
context = '{}_creation'.format(self.sceneType),
on_selection = lambda obj:
self.doCommand( 'main_editor/create_entity', entity = obj )
)
def renameEntity( self, target, name ):
#TODO:command pattern
target.setName( target, name )
signals.emit( 'entity.modified', target )
def addEntityNode( self, entity ):
self.tree.addNode( entity, expanded = False )
self.tree.setNodeExpanded( entity, False )
##----------------------------------------------------------------##
def onMenu( self, menu ):
name = menu.name
if name == 'create_entity':
self.createEntity()
elif name == 'clone_entity':
self.doCommand( 'main_editor/clone_entity' )
elif name == 'remove_entity':
self.doCommand( 'main_editor/remove_entity' )
elif name == 'remove_component':
context = menu.getContext()
if context:
self.doCommand( 'main_editor/remove_component', target = context )
def onTool( self, tool ):
name = tool.name
if name == 'create_entity':
self.createEntity()
elif name == 'remove_entity':
self.doCommand( 'main_editor/remove_entity' )
def onMoaiClean( self ):
self.tree.clear()
def onSelectionChanged( self, selection, key ):
if key != 'scene': return
if self.tree.syncSelection:
self.tree.blockSignals( True )
self.tree.selectNode( None )
for e in selection:
self.tree.selectNode( e, add = True)
self.tree.blockSignals( False )
def onSelectionHint( self, selection ):
pass
# if selection._entity:
# self.changeSelection( selection._entity )
# else:
# self.changeSelection( selection )
##----------------------------------------------------------------##
def onEntityAdded( self, entity, context = None ):
if context == 'new':
self.setFocus()
pnode = entity.parent
if pnode:
self.tree.setNodeExpanded( pnode, True )
self.tree.setFocus()
# self.tree.editNode( entity )
self.tree.selectNode( entity )
signals.emit( 'scene.update' )
self.markDirty()
def onEntityRemoved( self, entity ):
if entity:
self.tree.removeNode( entity )
signals.emit( 'scene.update' )
self.markDirty()
def onEntityRenamed( self, entity, newname ):
self.tree.refreshNodeContent( entity )
self.markDirty()
def onEntityModified( self, entity, context = None ):
self.markDirty()
def onEntityVisibleChanged( self, entity ):
self.tree.refreshNodeContent( entity )
def onEntityPickableChanged( self, entity ):
self.tree.refreshNodeContent( entity )
##----------------------------------------------------------------##
def onComponentAdded( self, com, entity ):
signals.emit( 'scene.update' )
self.markDirty()
def onComponentRemoved( self, com, entity ):
signals.emit( 'scene.update' )
self.markDirty()
##----------------------------------------------------------------##
GraphEditor().register()
##----------------------------------------------------------------##
class GraphTreeItemDelegate(QtGui.QStyledItemDelegate):
_textBrush = QBrush( QColor( '#dd5200' ) )
_textPen = QPen( QColor( '#dddddd' ) )
_textPenGroup = QPen( QColor( '#ada993' ) )
_backgroundBrushHovered = QBrush( QColor( '#454768' ) )
_backgroundBrushSelected = QBrush( QColor( '#515c84' ) )
def paint(self, painter, option, index):
painter.save()
index0 = index.sibling( index.row(), 0 )
utype = index0.data( Qt.UserRole )
# # set background color
if option.state & QStyle.State_Selected:
painter.setPen ( Qt.NoPen )
painter.setBrush( GraphTreeItemDelegate._backgroundBrushSelected )
painter.drawRect(option.rect)
elif option.state & QStyle.State_MouseOver:
painter.setPen ( Qt.NoPen )
painter.setBrush( GraphTreeItemDelegate._backgroundBrushHovered )
painter.drawRect(option.rect)
rect = option.rect
icon = QIcon( index.data( Qt.DecorationRole) )
rect.adjust( 5, 0, 0, 0 )
if icon and not icon.isNull():
icon.paint( painter, rect, Qt.AlignLeft )
rect.adjust( 22, 0, 0, 0 )
text = index.data(Qt.DisplayRole)
if utype == 1: #GROUP
painter.setPen( GraphTreeItemDelegate._textPenGroup )
else:
painter.setPen( GraphTreeItemDelegate._textPen )
painter.drawText( rect, Qt.AlignLeft | Qt.AlignVCenter, text )
painter.restore()
class ReadonlyGraphTreeItemDelegate( GraphTreeItemDelegate ):
def createEditor( *args ):
return None
##----------------------------------------------------------------##
class GraphTreeWidget( GenericTreeWidget ):
def __init__( self, *args, **kwargs ):
super( GraphTreeWidget, self ).__init__( *args, **kwargs )
self.syncSelection = True
# self.adjustingRange = False
# self.verticalScrollBar().rangeChanged.connect( self.onScrollRangeChanged )
self.setIndentation( 13 )
def getHeaderInfo( self ):
return [('Name',160), ('V',32 ), ('L',32 ), ('I',32 ), ('', -1) ] #( 'Layer', 50 ), ('', -1) ]
def getReadonlyItemDelegate( self ):
return ReadonlyGraphTreeItemDelegate( self )
def getDefaultItemDelegate( self ):
return GraphTreeItemDelegate( self )
def getRootNode( self ):
return self.module.getActiveSceneRootGroup()
def getNodeParent( self, node ):
p = node.getParent( node )
if p:
return p
return None
def getNodeChildren( self, node ):
output = []
children = node.children
if children:
for index in children:
output.append( children[index] )
return output
# FIXME todo:reparent move to Lua
def reparentNode( self, node, pitem, **option ):
mode = option.get("mode", None)
if node and pitem:
pnode = None
if pitem == 'root':
pnode = self.getRootNode()
else:
if mode:
pnode = pitem.node.parent # subling above below
else:
pnode = pitem.node
if pnode:
node.detach( node )
if mode:
itemMode = None
item = self.getItemByNode(node)
indexPaste = 0
if mode == "above":
itemMode = self.itemAbove(item)
indexPaste = 1
elif mode == "below":
itemMode = self.itemBelow(item)
indexPaste = -1
nodeMode = itemMode.node
nodeIndex = pnode.getChildIndex( pnode, nodeMode )
pnode.addChild( pnode, node, nodeIndex + indexPaste )
else:
pnode.addChild( pnode, node )
return True
return False
def updateItemContent( self, item, node, **option ):
name = None
item.setData( 0, Qt.UserRole, 0 )
if node and (node is not None):
className = node.className( node )
if isLuaInstance( node, 'Prefab' ):
item.setText( 0, node.name or '<prefab>' )
item.setIcon( 0, getIcon('prefab') )
else:
item.setText( 0, node.name or '<widget>' )
item.setIcon( 0, getIcon('dot') )
item.setIcon( 1, getIcon('entity_view') )
item.setIcon( 2, getIcon('entity_dot') )
def getItemFlags( self, node ):
flagNames = {}
return flagNames
##----------------------------------------------------------------##
def dropEvent( self, event ):
p = self.dropIndicatorPosition()
pos = False
if p == QtGui.QAbstractItemView.OnItem: # reparent
pos = 'on'
elif p == QtGui.QAbstractItemView.AboveItem:
pos = 'above'
elif p == QtGui.QAbstractItemView.BelowItem:
pos = 'below'
else:
pos = 'viewport'
target = self.itemAt( event.pos() )
items = self.module.getSelection()
ok = False
for item in items:
print("dropEvent pos: {} ::{} {} node:: ".format(pos, item, target))
if pos == 'on':
ok = self.reparentNode( item, target )
# ok = self.module.doCommand( 'scene_editor/reparent_entity', target = target.node )
elif pos == 'viewport':
ok = self.reparentNode( item, 'root' )
# ok = self.module.doCommand( 'scene_editor/reparent_entity', target = 'root' )
elif pos == 'above' or pos == 'below':
ok = self.reparentNode( item, target, mode = pos )
# ok = self.module.doCommand( 'scene_editor/reparent_entity', target = target.node, mode = 'sibling' )
if ok:
super( GenericTreeWidget, self ).dropEvent( event )
else:
event.setDropAction( Qt.IgnoreAction )
##----------------------------------------------------------------##
# Event Callback
##----------------------------------------------------------------##
def onClicked(self, item, col):
print("onClicked", item, col)
def onDClicked(self, item, col):
print("onDClicked", item, col)
def onItemSelectionChanged(self):
if not self.syncSelection: return
items = self.selectedItems()
if items:
selections=[item.node for item in items]
self.module.changeSelection(selections)
else:
self.module.changeSelection(None)
def onItemActivated(self, item, col):
print("onItemActivated", item, col)
def onItemExpanded( self, item ):
print("onItemExpanded", item)
def onItemCollapsed( self, item ):
print("onItemCollapsed", item)
def onClipboardCopy( self ):
print("onClipboardCopy")
def onClipboardCut( self ):
print("onClipboardCut")
def onClipboardPaste( self ):
print("onClipboardPaste")
def onItemChanged( self, item, col ):
self.module.renameEntity( item.node, item.text(0) )
def onDeletePressed( self ):
self.syncSelection = False
item0 = self.currentItem()
item1 = self.itemBelow( item0 )
self.module.doCommand( 'main_editor/remove_entity' )
if item1:
self.setFocusedItem( item1 )
self.syncSelection = True
self.onItemSelectionChanged()
##----------------------------------------------------------------##
def uiNameSearchEnumerator( typeId, context, option ):
if not context in [ 'ui_creation' ] : return None
registry = MOAIRuntime.get().getLuaClassRegistry( "ui" )
result = []
for name in sorted( registry ):
entry = ( name, name, 'UI', None )
result.append( entry )
return result
def entityNameSearchEnumerator( typeId, context, option ):
if not context in [ 'scene_creation' ] : return None
registry = MOAIRuntime.get().getLuaClassRegistry( "entity" )
result = []
for name in sorted( registry ):
entry = ( name, name, 'Entity', None )
result.append( entry )
return result
def componentNameSearchEnumerator( typeId, context, option ):
if not context in [ 'component_creation' ] : return None
registry = MOAIRuntime.get().getLuaClassRegistry( "component" )
result = []
for name in sorted( registry ):
entry = ( name, name, 'Entity', None )
result.append( entry )
return result
|
|
import time
import random
from os import path
import subprocess
import pytest
import cattle
REPLICA = 'tcp://localhost:9502'
REPLICA2 = 'tcp://localhost:9505'
@pytest.fixture
def controller_client(request):
url = 'http://localhost:9501/v1/schemas'
c = cattle.from_env(url=url)
request.addfinalizer(lambda: cleanup_controller(c))
c = cleanup_controller(c)
assert c.list_volume()[0].replicaCount == 0
return c
def cleanup_controller(client):
for r in client.list_replica():
client.delete(r)
return client
@pytest.fixture
def replica_client(request):
url = 'http://localhost:9502/v1/schemas'
c = cattle.from_env(url=url)
request.addfinalizer(lambda: cleanup_replica(c))
return cleanup_replica(c)
@pytest.fixture
def replica_client2(request):
url = 'http://localhost:9505/v1/schemas'
c = cattle.from_env(url=url)
request.addfinalizer(lambda: cleanup_replica(c))
return cleanup_replica(c)
def cleanup_replica(client):
r = client.list_replica()[0]
if r.state == 'initial':
return client
if 'open' in r:
r = r.open()
client.delete(r)
r = client.reload(r)
assert r.state == 'initial'
return client
@pytest.fixture
def random_str():
return 'random-{0}-{1}'.format(random_num(), int(time.time()))
def random_num():
return random.randint(0, 1000000)
def _file(f):
return path.join(_base(), '../../{}'.format(f))
def _base():
return path.dirname(__file__)
@pytest.fixture(scope='session')
def bin():
c = _file('bin/longhorn')
assert path.exists(c)
return c
def open_replica(client):
replicas = client.list_replica()
assert len(replicas) == 1
r = replicas[0]
assert r.state == 'initial'
assert r.size == '0'
assert r.sectorSize == 0
assert r.parent == ''
assert r.head == ''
r = r.create(size=str(1024*4096))
assert r.state == 'closed'
assert r.size == str(1024*4096)
assert r.sectorSize == 512
assert r.parent == ''
assert r.head == 'volume-head-000.img'
return r
def test_replica_add_start(bin, controller_client, replica_client):
open_replica(replica_client)
cmd = [bin, '--debug', 'add-replica', REPLICA]
subprocess.check_call(cmd)
volume = controller_client.list_volume()[0]
assert volume.replicaCount == 1
def test_replica_add_rebuild(bin, controller_client, replica_client,
replica_client2):
open_replica(replica_client)
open_replica(replica_client2)
r = replica_client.list_replica()[0]
r = r.open()
r = r.snapshot(name='000')
r = r.snapshot(name='001')
l = replica_client2.list_replica()[0]
assert r.chain == ['volume-head-002.img',
'volume-snap-001.img',
'volume-snap-000.img']
assert l.chain != ['volume-head-002.img',
'volume-snap-001.img',
'volume-snap-000.img']
r = r.close()
cmd = [bin, '--debug', 'add-replica', REPLICA]
subprocess.check_call(cmd)
volume = controller_client.list_volume()[0]
assert volume.replicaCount == 1
cmd = [bin, '--debug', 'add-replica', REPLICA2]
subprocess.check_call(cmd)
volume = controller_client.list_volume()[0]
assert volume.replicaCount == 2
replicas = controller_client.list_replica()
assert len(replicas) == 2
for r in replicas:
assert r.mode == 'RW'
def test_replica_add_after_rebuild_failed(bin, controller_client,
replica_client, replica_client2):
open_replica(replica_client)
open_replica(replica_client2)
r = replica_client.list_replica()[0]
r = r.open()
r = r.snapshot(name='000')
r.close()
cmd = [bin, '--debug', 'add-replica', REPLICA]
subprocess.check_call(cmd)
volume = controller_client.list_volume()[0]
assert volume.replicaCount == 1
l = replica_client2.list_replica()[0]
l = l.open()
l = l.setrebuilding(rebuilding=True)
l.close()
cmd = [bin, '--debug', 'add-replica', REPLICA2]
subprocess.check_call(cmd)
volume = controller_client.list_volume()[0]
assert volume.replicaCount == 2
replicas = controller_client.list_replica()
assert len(replicas) == 2
for r in replicas:
assert r.mode == 'RW'
def test_snapshot(bin, controller_client, replica_client, replica_client2):
open_replica(replica_client)
open_replica(replica_client2)
v = controller_client.list_volume()[0]
v = v.start(replicas=[
REPLICA,
REPLICA2,
])
assert v.replicaCount == 2
snap = v.snapshot()
assert snap.id != ''
snap2 = v.snapshot()
assert snap2.id != ''
cmd = [bin, '--debug', 'snapshot']
output = subprocess.check_output(cmd)
assert output == '''ID
{}
{}
'''.format(snap2.id, snap.id)
def test_snapshot_ls(bin, controller_client, replica_client, replica_client2):
open_replica(replica_client)
open_replica(replica_client2)
v = controller_client.list_volume()[0]
v = v.start(replicas=[
REPLICA,
REPLICA2,
])
assert v.replicaCount == 2
snap = v.snapshot()
assert snap.id != ''
snap2 = v.snapshot()
assert snap2.id != ''
cmd = [bin, '--debug', 'snapshot', 'ls']
output = subprocess.check_output(cmd)
assert output == '''ID
{}
{}
'''.format(snap2.id, snap.id)
def test_snapshot_create(bin, controller_client, replica_client,
replica_client2):
open_replica(replica_client)
open_replica(replica_client2)
v = controller_client.list_volume()[0]
v = v.start(replicas=[
REPLICA,
REPLICA2,
])
assert v.replicaCount == 2
cmd = [bin, 'snapshot', 'create']
output = subprocess.check_output(cmd).strip()
expected = replica_client.list_replica()[0].chain[1]
assert expected == 'volume-snap-{}.img'.format(output)
cmd = [bin, '--debug', 'snapshot', 'ls']
ls_output = subprocess.check_output(cmd)
assert ls_output == '''ID
{}
'''.format(output)
def test_snapshot_rm(bin, controller_client, replica_client, replica_client2):
open_replica(replica_client)
open_replica(replica_client2)
v = controller_client.list_volume()[0]
v = v.start(replicas=[
REPLICA,
REPLICA2,
])
assert v.replicaCount == 2
cmd = [bin, 'snapshot', 'create']
subprocess.check_call(cmd)
output = subprocess.check_output(cmd).strip()
chain = replica_client.list_replica()[0].chain
assert len(chain) == 3
assert chain[0] == 'volume-head-002.img'
assert chain[1] == 'volume-snap-{}.img'.format(output)
cmd = [bin, 'snapshot', 'rm', output]
subprocess.check_call(cmd)
new_chain = replica_client.list_replica()[0].chain
assert len(new_chain) == 2
assert chain[0] == new_chain[0]
assert chain[2] == new_chain[1]
def test_snapshot_last(bin, controller_client, replica_client,
replica_client2):
open_replica(replica_client)
open_replica(replica_client2)
v = controller_client.list_volume()[0]
v = v.start(replicas=[
REPLICA,
])
assert v.replicaCount == 1
cmd = [bin, 'add', REPLICA2]
subprocess.check_output(cmd)
output = subprocess.check_output([bin, 'snapshot', 'ls'])
output = output.splitlines()[1]
chain = replica_client.list_replica()[0].chain
assert len(chain) == 2
assert chain[0] == 'volume-head-001.img'
assert chain[1] == 'volume-snap-{}.img'.format(output)
chain = replica_client2.list_replica()[0].chain
assert len(chain) == 2
assert chain[0] == 'volume-head-001.img'
assert chain[1] == 'volume-snap-{}.img'.format(output)
cmd = [bin, 'snapshot', 'rm', output]
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call(cmd)
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types import (
export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config,
)
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition",
manifest={"AutoMlTables", "AutoMlTablesInputs", "AutoMlTablesMetadata",},
)
class AutoMlTables(proto.Message):
r"""A TrainingJob that trains and uploads an AutoML Tables Model.
Attributes:
inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs):
The input parameters of this TrainingJob.
metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesMetadata):
The metadata information.
"""
inputs = proto.Field(proto.MESSAGE, number=1, message="AutoMlTablesInputs",)
metadata = proto.Field(proto.MESSAGE, number=2, message="AutoMlTablesMetadata",)
class AutoMlTablesInputs(proto.Message):
r"""
Attributes:
optimization_objective_recall_value (float):
Required when optimization_objective is
"maximize-precision-at-recall". Must be between 0 and 1,
inclusive.
optimization_objective_precision_value (float):
Required when optimization_objective is
"maximize-recall-at-precision". Must be between 0 and 1,
inclusive.
prediction_type (str):
The type of prediction the Model is to
produce. "classification" - Predict one out of
multiple target values is
picked for each row.
"regression" - Predict a value based on its
relation to other values. This
type is available only to columns that contain
semantically numeric values, i.e. integers or
floating point number, even if
stored as e.g. strings.
target_column (str):
The column name of the target column that the
model is to predict.
transformations (Sequence[google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation]):
Each transformation will apply transform
function to given input column. And the result
will be used for training. When creating
transformation for BigQuery Struct column, the
column should be flattened using "." as the
delimiter.
optimization_objective (str):
Objective function the model is optimizing
towards. The training process creates a model
that maximizes/minimizes the value of the
objective function over the validation set.
The supported optimization objectives depend on
the prediction type. If the field is not set, a
default objective function is used.
classification (binary):
"maximize-au-roc" (default) - Maximize the
area under the receiver
operating characteristic (ROC) curve.
"minimize-log-loss" - Minimize log loss.
"maximize-au-prc" - Maximize the area under
the precision-recall curve. "maximize-
precision-at-recall" - Maximize precision for a
specified
recall value. "maximize-recall-at-precision" -
Maximize recall for a specified
precision value.
classification (multi-class):
"minimize-log-loss" (default) - Minimize log
loss.
regression:
"minimize-rmse" (default) - Minimize root-
mean-squared error (RMSE). "minimize-mae" -
Minimize mean-absolute error (MAE). "minimize-
rmsle" - Minimize root-mean-squared log error
(RMSLE).
train_budget_milli_node_hours (int):
Required. The train budget of creating this
model, expressed in milli node hours i.e. 1,000
value in this field means 1 node hour.
The training cost of the model will not exceed
this budget. The final cost will be attempted to
be close to the budget, though may end up being
(even) noticeably smaller - at the backend's
discretion. This especially may happen when
further model training ceases to provide any
improvements.
If the budget is set to a value known to be
insufficient to train a model for the given
dataset, the training won't be attempted and
will error.
The train budget must be between 1,000 and
72,000 milli node hours, inclusive.
disable_early_stopping (bool):
Use the entire training budget. This disables
the early stopping feature. By default, the
early stopping feature is enabled, which means
that AutoML Tables might stop training before
the entire training budget has been used.
weight_column_name (str):
Column name that should be used as the weight
column. Higher values in this column give more
importance to the row during model training. The
column must have numeric values between 0 and
10000 inclusively; 0 means the row is ignored
for training. If weight column field is not set,
then all rows are assumed to have equal weight
of 1.
export_evaluated_data_items_config (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.ExportEvaluatedDataItemsConfig):
Configuration for exporting test set
predictions to a BigQuery table. If this
configuration is absent, then the export is not
performed.
additional_experiments (Sequence[str]):
Additional experiment flags for the Tables
training pipeline.
"""
class Transformation(proto.Message):
r"""
Attributes:
auto (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.AutoTransformation):
numeric (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.NumericTransformation):
categorical (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.CategoricalTransformation):
timestamp (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.TimestampTransformation):
text (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.TextTransformation):
repeated_numeric (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.NumericArrayTransformation):
repeated_categorical (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.CategoricalArrayTransformation):
repeated_text (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.TextArrayTransformation):
"""
class AutoTransformation(proto.Message):
r"""Training pipeline will infer the proper transformation based
on the statistic of dataset.
Attributes:
column_name (str):
"""
column_name = proto.Field(proto.STRING, number=1,)
class NumericTransformation(proto.Message):
r"""Training pipeline will perform following transformation functions.
- The value converted to float32.
- The z_score of the value.
- log(value+1) when the value is greater than or equal to 0.
Otherwise, this transformation is not applied and the value is
considered a missing value.
- z_score of log(value+1) when the value is greater than or equal
to 0. Otherwise, this transformation is not applied and the value
is considered a missing value.
- A boolean value that indicates whether the value is valid.
Attributes:
column_name (str):
invalid_values_allowed (bool):
If invalid values is allowed, the training
pipeline will create a boolean feature that
indicated whether the value is valid. Otherwise,
the training pipeline will discard the input row
from trainining data.
"""
column_name = proto.Field(proto.STRING, number=1,)
invalid_values_allowed = proto.Field(proto.BOOL, number=2,)
class CategoricalTransformation(proto.Message):
r"""Training pipeline will perform following transformation functions.
- The categorical string as is--no change to case, punctuation,
spelling, tense, and so on.
- Convert the category name to a dictionary lookup index and
generate an embedding for each index.
- Categories that appear less than 5 times in the training dataset
are treated as the "unknown" category. The "unknown" category
gets its own special lookup index and resulting embedding.
Attributes:
column_name (str):
"""
column_name = proto.Field(proto.STRING, number=1,)
class TimestampTransformation(proto.Message):
r"""Training pipeline will perform following transformation functions.
- Apply the transformation functions for Numerical columns.
- Determine the year, month, day,and weekday. Treat each value from
the
- timestamp as a Categorical column.
- Invalid numerical values (for example, values that fall outside
of a typical timestamp range, or are extreme values) receive no
special treatment and are not removed.
Attributes:
column_name (str):
time_format (str):
The format in which that time field is expressed. The
time_format must either be one of:
- ``unix-seconds``
- ``unix-milliseconds``
- ``unix-microseconds``
- ``unix-nanoseconds`` (for respectively number of seconds,
milliseconds, microseconds and nanoseconds since start of
the Unix epoch); or be written in ``strftime`` syntax. If
time_format is not set, then the default format is RFC
3339 ``date-time`` format, where ``time-offset`` =
``"Z"`` (e.g. 1985-04-12T23:20:50.52Z)
invalid_values_allowed (bool):
If invalid values is allowed, the training
pipeline will create a boolean feature that
indicated whether the value is valid. Otherwise,
the training pipeline will discard the input row
from trainining data.
"""
column_name = proto.Field(proto.STRING, number=1,)
time_format = proto.Field(proto.STRING, number=2,)
invalid_values_allowed = proto.Field(proto.BOOL, number=3,)
class TextTransformation(proto.Message):
r"""Training pipeline will perform following transformation functions.
- The text as is--no change to case, punctuation, spelling, tense,
and so on.
- Tokenize text to words. Convert each words to a dictionary lookup
index and generate an embedding for each index. Combine the
embedding of all elements into a single embedding using the mean.
- Tokenization is based on unicode script boundaries.
- Missing values get their own lookup index and resulting
embedding.
- Stop-words receive no special treatment and are not removed.
Attributes:
column_name (str):
"""
column_name = proto.Field(proto.STRING, number=1,)
class NumericArrayTransformation(proto.Message):
r"""Treats the column as numerical array and performs following
transformation functions.
- All transformations for Numerical types applied to the average of
the all elements.
- The average of empty arrays is treated as zero.
Attributes:
column_name (str):
invalid_values_allowed (bool):
If invalid values is allowed, the training
pipeline will create a boolean feature that
indicated whether the value is valid. Otherwise,
the training pipeline will discard the input row
from trainining data.
"""
column_name = proto.Field(proto.STRING, number=1,)
invalid_values_allowed = proto.Field(proto.BOOL, number=2,)
class CategoricalArrayTransformation(proto.Message):
r"""Treats the column as categorical array and performs following
transformation functions.
- For each element in the array, convert the category name to a
dictionary lookup index and generate an embedding for each index.
Combine the embedding of all elements into a single embedding
using the mean.
- Empty arrays treated as an embedding of zeroes.
Attributes:
column_name (str):
"""
column_name = proto.Field(proto.STRING, number=1,)
class TextArrayTransformation(proto.Message):
r"""Treats the column as text array and performs following
transformation functions.
- Concatenate all text values in the array into a single text value
using a space (" ") as a delimiter, and then treat the result as
a single text value. Apply the transformations for Text columns.
- Empty arrays treated as an empty text.
Attributes:
column_name (str):
"""
column_name = proto.Field(proto.STRING, number=1,)
auto = proto.Field(
proto.MESSAGE,
number=1,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.AutoTransformation",
)
numeric = proto.Field(
proto.MESSAGE,
number=2,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.NumericTransformation",
)
categorical = proto.Field(
proto.MESSAGE,
number=3,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.CategoricalTransformation",
)
timestamp = proto.Field(
proto.MESSAGE,
number=4,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.TimestampTransformation",
)
text = proto.Field(
proto.MESSAGE,
number=5,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.TextTransformation",
)
repeated_numeric = proto.Field(
proto.MESSAGE,
number=6,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.NumericArrayTransformation",
)
repeated_categorical = proto.Field(
proto.MESSAGE,
number=7,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.CategoricalArrayTransformation",
)
repeated_text = proto.Field(
proto.MESSAGE,
number=8,
oneof="transformation_detail",
message="AutoMlTablesInputs.Transformation.TextArrayTransformation",
)
optimization_objective_recall_value = proto.Field(
proto.FLOAT, number=5, oneof="additional_optimization_objective_config",
)
optimization_objective_precision_value = proto.Field(
proto.FLOAT, number=6, oneof="additional_optimization_objective_config",
)
prediction_type = proto.Field(proto.STRING, number=1,)
target_column = proto.Field(proto.STRING, number=2,)
transformations = proto.RepeatedField(
proto.MESSAGE, number=3, message=Transformation,
)
optimization_objective = proto.Field(proto.STRING, number=4,)
train_budget_milli_node_hours = proto.Field(proto.INT64, number=7,)
disable_early_stopping = proto.Field(proto.BOOL, number=8,)
weight_column_name = proto.Field(proto.STRING, number=9,)
export_evaluated_data_items_config = proto.Field(
proto.MESSAGE,
number=10,
message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig,
)
additional_experiments = proto.RepeatedField(proto.STRING, number=11,)
class AutoMlTablesMetadata(proto.Message):
r"""Model metadata specific to AutoML Tables.
Attributes:
train_cost_milli_node_hours (int):
Output only. The actual training cost of the
model, expressed in milli node hours, i.e. 1,000
value in this field means 1 node hour.
Guaranteed to not exceed the train budget.
"""
train_cost_milli_node_hours = proto.Field(proto.INT64, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Proxy models for augmenting our source data tables with methods useful for processing.
"""
from __future__ import unicode_literals
from django.db.models.functions import Cast
# Models
from django.db.models import (
IntegerField,
Case,
Q,
When
)
from .elections import OCDElectionProxy
from opencivicdata.core.models import Membership
from opencivicdata.elections.models import Candidacy, CandidacySource
from calaccess_processed.proxies import OCDProxyModelMixin
from ..core.people import OCDPersonProxy
# Managers
from calaccess_processed.managers import BulkLoadSQLManager
from calaccess_processed_elections.managers import OCDCandidacyManager
class OCDCandidacyProxy(Candidacy, OCDProxyModelMixin):
"""
A proxy on the OCD Candidacy model with helper methods.
"""
objects = OCDCandidacyManager()
copy_to_fields = (
('id',),
('candidate_name',),
('person',),
('party',),
('contest',),
('post',),
('is_incumbent',),
('registration_status',),
('top_ticket_candidacy',),
('filed_date',),
('created_at',),
('updated_at',),
('extras',),
('locked_fields',),
)
class Meta:
"""
Make this a proxy model.
"""
app_label = "calaccess_processed_elections"
proxy = True
@property
def election_proxy(self):
"""
Returns the proxied OCDElectionProxy linked to this candidacy.
"""
return OCDElectionProxy.objects.get(id=self.contest.election_id)
@property
def filer_ids(self):
"""
Returns the CAL-ACCESS filer_id linked with the object, if any.
"""
return self.person.identifiers.filter(scheme="calaccess_filer_id")
@property
def form501_filing_ids(self):
"""
Returns any linked Form 501 filing ids.
"""
try:
return self.extras['form501_filing_ids']
except KeyError:
return []
@property
def form501s(self):
"""
Returns any linked Form 501 objects.
"""
from calaccess_processed_filings.models import Form501Filing
return Form501Filing.objects.filter(filing_id__in=self.form501_filing_ids)
@property
def person_proxy(self):
"""
Returns an OCDPersonProxy instance linked to the Candidacy.
"""
person = self.person
person.__class__ = OCDPersonProxy
return person
def link_form501(self, form501_id):
"""
Link an id of a Form501Filing to a Candidacy, if it isn't already.
"""
# Check if the attribute is already there
if 'form501_filing_ids' in self.extras:
# If it is, check if we already have this id
if form501_id not in self.extras['form501_filing_ids']:
# If we don't, append it to the list
self.extras['form501_filing_ids'].append(form501_id)
# Save out
self.save()
# If the attribute isn't there, go ahead and add it.
else:
self.extras['form501_filing_ids'] = [form501_id]
# Save out
self.save()
def update_from_form501(self):
"""
Set Candidacy fields using data extracted from linked Form501Filings.
"""
from calaccess_processed_filings.models import Form501Filing
# get all Form501Filing linked to Candidacy
filing_ids = self.extras['form501_filing_ids']
filings = Form501Filing.objects.filter(filing_id__in=filing_ids)
# keep the earliest filed_date
first_filed_date = filings.earliest('date_filed').date_filed
# Update filed_date if not the earliest
if self.filed_date != first_filed_date:
self.filed_date = first_filed_date
self.save()
# set registration status to "withdrawn" based on statement_type of latest Form501
latest = filings.latest('date_filed')
if latest.statement_type == '10003': # <-- This is the code for withdrawn
# If the candidacy hasn't been marked that way, update it now
if self.registration_status != 'withdrawn':
self.registration_status = 'withdrawn'
self.save()
def link_filer_ids_from_form501s(self):
"""
Create PersonIdentifiers for each filer_id from Form501Filings.
"""
from calaccess_processed_filings.models import Form501Filing
person = self.person
current_filer_ids = [
i.identifier for i in person.identifiers.filter(scheme='calaccess_filer_id')
]
filing_ids = self.extras['form501_filing_ids']
missing_filer_ids = [
f.filer_id for f in Form501Filing.objects.filter(
filing_id__in=filing_ids
).exclude(
filer_id__in=current_filer_ids
)
]
for i in missing_filer_ids:
person.identifiers.get_or_create(
scheme='calaccess_filer_id',
identifier=i,
)
def update_party_from_form501(self):
"""
Update party for Candidacy based on latest Form501 where its populated.
"""
from calaccess_processed_filings.models import Form501Filing
# get all Form501Filing linked to Candidacy
filing_ids = self.extras['form501_filing_ids']
filings = Form501Filing.objects.filter(filing_id__in=filing_ids)
latest_party = filings.filter(
party__isnull=False
).latest('date_filed').get_party()
if latest_party != self.party:
self.party = latest_party
self.save()
def check_incumbency(self):
"""
Check if the Candidacy is for the incumbent officeholder.
Return True if:
* Membership exists for the Person and Post linked to the Candidacy, and
* Membership.end_date is NULL or has a year later than Election.date.year.
"""
incumbent_q = Membership.objects.filter(
post=self.post,
person=self.person,
).annotate(
# Cast end_date's value as an int, treat '' as NULL
end_year=Cast(
Case(When(end_date='', then=None)),
IntegerField(),
)
).filter(
Q(end_year__gt=self.election.date.year)
| Q(end_date='')
)
if incumbent_q.exists():
return True
else:
return False
class OCDCandidacySourceProxy(CandidacySource, OCDProxyModelMixin):
"""
A proxy on the OCD CandidacySource model.
"""
objects = BulkLoadSQLManager()
class Meta:
"""
Make this a proxy model.
"""
app_label = "calaccess_processed_elections"
proxy = True
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import timedelta
import json
import unittest
from urllib.parse import quote_plus
from airflow import configuration
from airflow.api.common.experimental.trigger_dag import trigger_dag
from airflow.models import DagBag, DagModel, DagRun, Pool, TaskInstance
from airflow.settings import Session
from airflow.utils.timezone import datetime, utcnow
from airflow.www import app as application
class TestApiExperimental(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestApiExperimental, cls).setUpClass()
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
def setUp(self):
super(TestApiExperimental, self).setUp()
configuration.load_test_config()
app = application.create_app(testing=True)
self.app = app.test_client()
def tearDown(self):
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
super(TestApiExperimental, self).tearDown()
def test_task_info(self):
url_template = '/api/experimental/dags/{}/tasks/{}'
response = self.app.get(
url_template.format('example_bash_operator', 'runme_0')
)
self.assertIn('"email"', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
response = self.app.get(
url_template.format('example_bash_operator', 'DNE')
)
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
response = self.app.get(
url_template.format('DNE', 'DNE')
)
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
def test_trigger_dag(self):
url_template = '/api/experimental/dags/{}/dag_runs'
response = self.app.post(
url_template.format('example_bash_operator'),
data=json.dumps({'run_id': 'my_run' + utcnow().isoformat()}),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
response = self.app.post(
url_template.format('does_not_exist_dag'),
data=json.dumps({}),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
def test_delete_dag(self):
url_template = '/api/experimental/dags/{}'
from airflow import settings
session = settings.Session()
key = "my_dag_id"
session.add(DagModel(dag_id=key))
session.commit()
response = self.app.delete(
url_template.format(key),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
response = self.app.delete(
url_template.format('does_not_exist_dag'),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
def test_trigger_dag_for_date(self):
url_template = '/api/experimental/dags/{}/dag_runs'
dag_id = 'example_bash_operator'
hour_from_now = utcnow() + timedelta(hours=1)
execution_date = datetime(hour_from_now.year,
hour_from_now.month,
hour_from_now.day,
hour_from_now.hour)
datetime_string = execution_date.isoformat()
# Test Correct execution
response = self.app.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': datetime_string}),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
dagbag = DagBag()
dag = dagbag.get_dag(dag_id)
dag_run = dag.get_dagrun(execution_date)
self.assertTrue(dag_run,
'Dag Run not found for execution date {}'
.format(execution_date))
# Test error for nonexistent dag
response = self.app.post(
url_template.format('does_not_exist_dag'),
data=json.dumps({'execution_date': execution_date.isoformat()}),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
# Test error for bad datetime format
response = self.app.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': 'not_a_datetime'}),
content_type="application/json"
)
self.assertEqual(400, response.status_code)
def test_task_instance_info(self):
url_template = '/api/experimental/dags/{}/dag_runs/{}/tasks/{}'
dag_id = 'example_bash_operator'
task_id = 'also_run_this'
execution_date = utcnow().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(
datetime(1990, 1, 1, 1, 1, 1).isoformat()
)
# Create DagRun
trigger_dag(dag_id=dag_id,
run_id='test_task_instance_info_run',
execution_date=execution_date)
# Test Correct execution
response = self.app.get(
url_template.format(dag_id, datetime_string, task_id)
)
self.assertEqual(200, response.status_code)
self.assertIn('state', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag
response = self.app.get(
url_template.format('does_not_exist_dag', datetime_string,
task_id),
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent task
response = self.app.get(
url_template.format(dag_id, datetime_string, 'does_not_exist_task')
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag run (wrong execution_date)
response = self.app.get(
url_template.format(dag_id, wrong_datetime_string, task_id)
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for bad datetime format
response = self.app.get(
url_template.format(dag_id, 'not_a_datetime', task_id)
)
self.assertEqual(400, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
class TestPoolApiExperimental(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestPoolApiExperimental, cls).setUpClass()
session = Session()
session.query(Pool).delete()
session.commit()
session.close()
def setUp(self):
super(TestPoolApiExperimental, self).setUp()
configuration.load_test_config()
app = application.create_app(testing=True)
self.app = app.test_client()
self.session = Session()
self.pools = []
for i in range(2):
name = 'experimental_%s' % (i + 1)
pool = Pool(
pool=name,
slots=i,
description=name,
)
self.session.add(pool)
self.pools.append(pool)
self.session.commit()
self.pool = self.pools[0]
def tearDown(self):
self.session.query(Pool).delete()
self.session.commit()
self.session.close()
super(TestPoolApiExperimental, self).tearDown()
def _get_pool_count(self):
response = self.app.get('/api/experimental/pools')
self.assertEqual(response.status_code, 200)
return len(json.loads(response.data.decode('utf-8')))
def test_get_pool(self):
response = self.app.get(
'/api/experimental/pools/{}'.format(self.pool.pool),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode('utf-8')),
self.pool.to_json())
def test_get_pool_non_existing(self):
response = self.app.get('/api/experimental/pools/foo')
self.assertEqual(response.status_code, 404)
self.assertEqual(json.loads(response.data.decode('utf-8'))['error'],
"Pool 'foo' doesn't exist")
def test_get_pools(self):
response = self.app.get('/api/experimental/pools')
self.assertEqual(response.status_code, 200)
pools = json.loads(response.data.decode('utf-8'))
self.assertEqual(len(pools), 2)
for i, pool in enumerate(sorted(pools, key=lambda p: p['pool'])):
self.assertDictEqual(pool, self.pools[i].to_json())
def test_create_pool(self):
response = self.app.post(
'/api/experimental/pools',
data=json.dumps({
'name': 'foo',
'slots': 1,
'description': '',
}),
content_type='application/json',
)
self.assertEqual(response.status_code, 200)
pool = json.loads(response.data.decode('utf-8'))
self.assertEqual(pool['pool'], 'foo')
self.assertEqual(pool['slots'], 1)
self.assertEqual(pool['description'], '')
self.assertEqual(self._get_pool_count(), 3)
def test_create_pool_with_bad_name(self):
for name in ('', ' '):
response = self.app.post(
'/api/experimental/pools',
data=json.dumps({
'name': name,
'slots': 1,
'description': '',
}),
content_type='application/json',
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.data.decode('utf-8'))['error'],
"Pool name shouldn't be empty",
)
self.assertEqual(self._get_pool_count(), 2)
def test_delete_pool(self):
response = self.app.delete(
'/api/experimental/pools/{}'.format(self.pool.pool),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode('utf-8')),
self.pool.to_json())
self.assertEqual(self._get_pool_count(), 1)
def test_delete_pool_non_existing(self):
response = self.app.delete(
'/api/experimental/pools/foo',
)
self.assertEqual(response.status_code, 404)
self.assertEqual(json.loads(response.data.decode('utf-8'))['error'],
"Pool 'foo' doesn't exist")
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'PayPalPDT.case_id'
db.alter_column(u'paypal_pdt', 'case_id', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.transaction_entity'
db.alter_column(u'paypal_pdt', 'transaction_entity', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.address_status'
db.alter_column(u'paypal_pdt', 'address_status', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.reason_code'
db.alter_column(u'paypal_pdt', 'reason_code', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.profile_status'
db.alter_column(u'paypal_pdt', 'profile_status', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.txn_id'
db.alter_column(u'paypal_pdt', 'txn_id', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.payment_status'
db.alter_column(u'paypal_pdt', 'payment_status', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.payer_status'
db.alter_column(u'paypal_pdt', 'payer_status', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.period_type'
db.alter_column(u'paypal_pdt', 'period_type', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.pending_reason'
db.alter_column(u'paypal_pdt', 'pending_reason', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.payment_type'
db.alter_column(u'paypal_pdt', 'payment_type', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.recurring_payment_id'
db.alter_column(u'paypal_pdt', 'recurring_payment_id', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.protection_eligibility'
db.alter_column(u'paypal_pdt', 'protection_eligibility', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.receiver_email'
db.alter_column(u'paypal_pdt', 'receiver_email', self.gf('django.db.models.fields.EmailField')(max_length=254))
# Changing field 'PayPalPDT.txn_type'
db.alter_column(u'paypal_pdt', 'txn_type', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.auth_status'
db.alter_column(u'paypal_pdt', 'auth_status', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.payment_cycle'
db.alter_column(u'paypal_pdt', 'payment_cycle', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.charset'
db.alter_column(u'paypal_pdt', 'charset', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.product_name'
db.alter_column(u'paypal_pdt', 'product_name', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.receiver_id'
db.alter_column(u'paypal_pdt', 'receiver_id', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.period2'
db.alter_column(u'paypal_pdt', 'period2', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.period3'
db.alter_column(u'paypal_pdt', 'period3', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.period1'
db.alter_column(u'paypal_pdt', 'period1', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.product_type'
db.alter_column(u'paypal_pdt', 'product_type', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.receipt_id'
db.alter_column(u'paypal_pdt', 'receipt_id', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalPDT.case_type'
db.alter_column(u'paypal_pdt', 'case_type', self.gf('django.db.models.fields.CharField')(max_length=255))
def backwards(self, orm):
# Changing field 'PayPalPDT.case_id'
db.alter_column(u'paypal_pdt', 'case_id', self.gf('django.db.models.fields.CharField')(max_length=14))
# Changing field 'PayPalPDT.transaction_entity'
db.alter_column(u'paypal_pdt', 'transaction_entity', self.gf('django.db.models.fields.CharField')(max_length=7))
# Changing field 'PayPalPDT.address_status'
db.alter_column(u'paypal_pdt', 'address_status', self.gf('django.db.models.fields.CharField')(max_length=11))
# Changing field 'PayPalPDT.reason_code'
db.alter_column(u'paypal_pdt', 'reason_code', self.gf('django.db.models.fields.CharField')(max_length=15))
# Changing field 'PayPalPDT.profile_status'
db.alter_column(u'paypal_pdt', 'profile_status', self.gf('django.db.models.fields.CharField')(max_length=32))
# Changing field 'PayPalPDT.txn_id'
db.alter_column(u'paypal_pdt', 'txn_id', self.gf('django.db.models.fields.CharField')(max_length=19))
# Changing field 'PayPalPDT.payment_status'
db.alter_column(u'paypal_pdt', 'payment_status', self.gf('django.db.models.fields.CharField')(max_length=17))
# Changing field 'PayPalPDT.payer_status'
db.alter_column(u'paypal_pdt', 'payer_status', self.gf('django.db.models.fields.CharField')(max_length=10))
# Changing field 'PayPalPDT.period_type'
db.alter_column(u'paypal_pdt', 'period_type', self.gf('django.db.models.fields.CharField')(max_length=32))
# Changing field 'PayPalPDT.pending_reason'
db.alter_column(u'paypal_pdt', 'pending_reason', self.gf('django.db.models.fields.CharField')(max_length=14))
# Changing field 'PayPalPDT.payment_type'
db.alter_column(u'paypal_pdt', 'payment_type', self.gf('django.db.models.fields.CharField')(max_length=7))
# Changing field 'PayPalPDT.recurring_payment_id'
db.alter_column(u'paypal_pdt', 'recurring_payment_id', self.gf('django.db.models.fields.CharField')(max_length=128))
# Changing field 'PayPalPDT.protection_eligibility'
db.alter_column(u'paypal_pdt', 'protection_eligibility', self.gf('django.db.models.fields.CharField')(max_length=32))
# Changing field 'PayPalPDT.receiver_email'
db.alter_column(u'paypal_pdt', 'receiver_email', self.gf('django.db.models.fields.EmailField')(max_length=127))
# Changing field 'PayPalPDT.txn_type'
db.alter_column(u'paypal_pdt', 'txn_type', self.gf('django.db.models.fields.CharField')(max_length=128))
# Changing field 'PayPalPDT.auth_status'
db.alter_column(u'paypal_pdt', 'auth_status', self.gf('django.db.models.fields.CharField')(max_length=9))
# Changing field 'PayPalPDT.payment_cycle'
db.alter_column(u'paypal_pdt', 'payment_cycle', self.gf('django.db.models.fields.CharField')(max_length=32))
# Changing field 'PayPalPDT.charset'
db.alter_column(u'paypal_pdt', 'charset', self.gf('django.db.models.fields.CharField')(max_length=32))
# Changing field 'PayPalPDT.product_name'
db.alter_column(u'paypal_pdt', 'product_name', self.gf('django.db.models.fields.CharField')(max_length=128))
# Changing field 'PayPalPDT.receiver_id'
db.alter_column(u'paypal_pdt', 'receiver_id', self.gf('django.db.models.fields.CharField')(max_length=127))
# Changing field 'PayPalPDT.period2'
db.alter_column(u'paypal_pdt', 'period2', self.gf('django.db.models.fields.CharField')(max_length=32))
# Changing field 'PayPalPDT.period3'
db.alter_column(u'paypal_pdt', 'period3', self.gf('django.db.models.fields.CharField')(max_length=32))
# Changing field 'PayPalPDT.period1'
db.alter_column(u'paypal_pdt', 'period1', self.gf('django.db.models.fields.CharField')(max_length=32))
# Changing field 'PayPalPDT.product_type'
db.alter_column(u'paypal_pdt', 'product_type', self.gf('django.db.models.fields.CharField')(max_length=128))
# Changing field 'PayPalPDT.receipt_id'
db.alter_column(u'paypal_pdt', 'receipt_id', self.gf('django.db.models.fields.CharField')(max_length=64))
# Changing field 'PayPalPDT.case_type'
db.alter_column(u'paypal_pdt', 'case_type', self.gf('django.db.models.fields.CharField')(max_length=24))
models = {
u'pdt.paypalpdt': {
'Meta': {'object_name': 'PayPalPDT', 'db_table': "u'paypal_pdt'"},
'address_city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'address_country_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'address_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_state': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'address_street': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'address_zip': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount1': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount2': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount3': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount_per_cycle': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amt': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'auction_buyer_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'auction_closing_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'auction_multi_item': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'auth_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'auth_exp': ('django.db.models.fields.CharField', [], {'max_length': '28', 'blank': 'True'}),
'auth_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'auth_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'business': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'case_creation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'case_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'case_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'charset': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'cm': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'currency_code': ('django.db.models.fields.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}),
'custom': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'exchange_rate': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '16', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flag_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'flag_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'for_auction': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'from_view': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'handling_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_payment_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'invoice': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'ipaddress': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True', 'blank': 'True'}),
'item_name': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'item_number': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'mc_amount1': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_amount2': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_amount3': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_currency': ('django.db.models.fields.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}),
'mc_fee': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_gross': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_handling': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_shipping': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'memo': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'mp_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'next_payment_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'notify_version': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'num_cart_items': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'option_name1': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'option_name2': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'outstanding_balance': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'parent_txn_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'payer_business_name': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'payer_email': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'payer_id': ('django.db.models.fields.CharField', [], {'max_length': '13', 'blank': 'True'}),
'payer_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'payment_cycle': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'payment_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'payment_gross': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'payment_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'payment_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'pending_reason': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'period1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'period2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'period3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'period_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'product_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'product_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'profile_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'protection_eligibility': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'query': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'reason_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'reattempt': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'receipt_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'receiver_email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'receiver_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'recur_times': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'recurring': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'recurring_payment_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remaining_settle': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'residence_country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'retry_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rp_invoice_id': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'settle_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'settle_currency': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'shipping': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'sig': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'st': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'subscr_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subscr_effective': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subscr_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'test_ipn': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'transaction_entity': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'transaction_subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'tx': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'txn_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'txn_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'verify_sign': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['pdt']
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pprint
import re
import socket
import sys
import types
import uuid
import eventlet
import greenlet
from oslo.config import cfg
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common.rpc import common as rpc_common
zmq = importutils.try_import('eventlet.green.zmq')
# for convenience, are not modified.
pformat = pprint.pformat
Timeout = eventlet.timeout.Timeout
LOG = rpc_common.LOG
RemoteError = rpc_common.RemoteError
RPCException = rpc_common.RPCException
zmq_opts = [
cfg.StrOpt('rpc_zmq_bind_address', default='*',
help='ZeroMQ bind address. Should be a wildcard (*), '
'an ethernet interface, or IP. '
'The "host" option should point or resolve to this '
'address.'),
# The module.Class to use for matchmaking.
cfg.StrOpt(
'rpc_zmq_matchmaker',
default=('nova.openstack.common.rpc.'
'matchmaker.MatchMakerLocalhost'),
help='MatchMaker driver',
),
# The following port is unassigned by IANA as of 2012-05-21
cfg.IntOpt('rpc_zmq_port', default=9501,
help='ZeroMQ receiver listening port'),
cfg.IntOpt('rpc_zmq_contexts', default=1,
help='Number of ZeroMQ contexts, defaults to 1'),
cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
help='Maximum number of ingress messages to locally buffer '
'per topic. Default is unlimited.'),
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
help='Directory for holding IPC sockets'),
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
help='Name of this node. Must be a valid hostname, FQDN, or '
'IP address. Must match "host" option, if running Nova.')
]
CONF = cfg.CONF
CONF.register_opts(zmq_opts)
ZMQ_CTX = None # ZeroMQ Context, must be global.
matchmaker = None # memoized matchmaker object
def _serialize(data):
"""Serialization wrapper.
We prefer using JSON, but it cannot encode all types.
Error if a developer passes us bad data.
"""
try:
return jsonutils.dumps(data, ensure_ascii=True)
except TypeError:
with excutils.save_and_reraise_exception():
LOG.error(_("JSON serialization failed."))
def _deserialize(data):
"""Deserialization wrapper."""
LOG.debug(_("Deserializing: %s"), data)
return jsonutils.loads(data)
class ZmqSocket(object):
"""A tiny wrapper around ZeroMQ.
Simplifies the send/recv protocol and connection management.
Can be used as a Context (supports the 'with' statement).
"""
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
self.sock = _get_ctxt().socket(zmq_type)
self.addr = addr
self.type = zmq_type
self.subscriptions = []
# Support failures on sending/receiving on wrong socket type.
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
self.can_sub = zmq_type in (zmq.SUB, )
# Support list, str, & None for subscribe arg (cast to list)
do_sub = {
list: subscribe,
str: [subscribe],
type(None): []
}[type(subscribe)]
for f in do_sub:
self.subscribe(f)
str_data = {'addr': addr, 'type': self.socket_s(),
'subscribe': subscribe, 'bind': bind}
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
LOG.debug(_("-> bind: %(bind)s"), str_data)
try:
if bind:
self.sock.bind(addr)
else:
self.sock.connect(addr)
except Exception:
raise RPCException(_("Could not open socket."))
def socket_s(self):
"""Get socket type as string."""
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
'DEALER')
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
def subscribe(self, msg_filter):
"""Subscribe."""
if not self.can_sub:
raise RPCException("Cannot subscribe on this socket.")
LOG.debug(_("Subscribing to %s"), msg_filter)
try:
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
except Exception:
return
self.subscriptions.append(msg_filter)
def unsubscribe(self, msg_filter):
"""Unsubscribe."""
if msg_filter not in self.subscriptions:
return
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
self.subscriptions.remove(msg_filter)
def close(self):
if self.sock is None or self.sock.closed:
return
# We must unsubscribe, or we'll leak descriptors.
if self.subscriptions:
for f in self.subscriptions:
try:
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
except Exception:
pass
self.subscriptions = []
try:
# Default is to linger
self.sock.close()
except Exception:
# While this is a bad thing to happen,
# it would be much worse if some of the code calling this
# were to fail. For now, lets log, and later evaluate
# if we can safely raise here.
LOG.error("ZeroMQ socket could not be closed.")
self.sock = None
def recv(self, **kwargs):
if not self.can_recv:
raise RPCException(_("You cannot recv on this socket."))
return self.sock.recv_multipart(**kwargs)
def send(self, data, **kwargs):
if not self.can_send:
raise RPCException(_("You cannot send on this socket."))
self.sock.send_multipart(data, **kwargs)
class ZmqClient(object):
"""Client for ZMQ sockets."""
def __init__(self, addr):
self.outq = ZmqSocket(addr, zmq.PUSH, bind=False)
def cast(self, msg_id, topic, data, envelope):
msg_id = msg_id or 0
if not envelope:
self.outq.send(map(bytes,
(msg_id, topic, 'cast', _serialize(data))))
return
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items())
self.outq.send(map(bytes,
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
def close(self):
self.outq.close()
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.replies = []
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['replies'] = self.replies
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False):
if ending:
return
self.replies.append(reply)
@classmethod
def marshal(self, ctx):
ctx_data = ctx.to_dict()
return _serialize(ctx_data)
@classmethod
def unmarshal(self, data):
return RpcContext.from_dict(_deserialize(data))
class InternalContext(object):
"""Used by ConsumerBase as a private context for - methods."""
def __init__(self, proxy):
self.proxy = proxy
self.msg_waiter = None
def _get_response(self, ctx, proxy, topic, data):
"""Process a curried message and cast the result to topic."""
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
data.setdefault('version', None)
data.setdefault('args', {})
try:
result = proxy.dispatch(
ctx, data['version'], data['method'],
data.get('namespace'), **data['args'])
return ConsumerBase.normalize_reply(result, ctx.replies)
except greenlet.GreenletExit:
# ignore these since they are just from shutdowns
pass
except rpc_common.ClientException as e:
LOG.debug(_("Expected exception during message handling (%s)") %
e._exc_info[1])
return {'exc':
rpc_common.serialize_remote_exception(e._exc_info,
log_failure=False)}
except Exception:
LOG.error(_("Exception during message handling"))
return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())}
def reply(self, ctx, proxy,
msg_id=None, context=None, topic=None, msg=None):
"""Reply to a casted call."""
# NOTE(ewindisch): context kwarg exists for Grizzly compat.
# this may be able to be removed earlier than
# 'I' if ConsumerBase.process were refactored.
if type(msg) is list:
payload = msg[-1]
else:
payload = msg
response = ConsumerBase.normalize_reply(
self._get_response(ctx, proxy, topic, payload),
ctx.replies)
LOG.debug(_("Sending reply"))
_multi_send(_cast, ctx, topic, {
'method': '-process_reply',
'args': {
'msg_id': msg_id, # Include for Folsom compat.
'response': response
}
}, _msg_id=msg_id)
class ConsumerBase(object):
"""Base Consumer."""
def __init__(self):
self.private_ctx = InternalContext(None)
@classmethod
def normalize_reply(self, result, replies):
#TODO(ewindisch): re-evaluate and document this method.
if isinstance(result, types.GeneratorType):
return list(result)
elif replies:
return replies
else:
return [result]
def process(self, proxy, ctx, data):
data.setdefault('version', None)
data.setdefault('args', {})
# Method starting with - are
# processed internally. (non-valid method name)
method = data.get('method')
if not method:
LOG.error(_("RPC message did not include method."))
return
# Internal method
# uses internal context for safety.
if method == '-reply':
self.private_ctx.reply(ctx, proxy, **data['args'])
return
proxy.dispatch(ctx, data['version'],
data['method'], data.get('namespace'), **data['args'])
class ZmqBaseReactor(ConsumerBase):
"""A consumer class implementing a centralized casting broker (PULL-PUSH).
Used for RoundRobin requests.
"""
def __init__(self, conf):
super(ZmqBaseReactor, self).__init__()
self.proxies = {}
self.threads = []
self.sockets = []
self.subscribe = {}
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
def register(self, proxy, in_addr, zmq_type_in,
in_bind=True, subscribe=None):
LOG.info(_("Registering reactor"))
if zmq_type_in not in (zmq.PULL, zmq.SUB):
raise RPCException("Bad input socktype")
# Items push in.
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
subscribe=subscribe)
self.proxies[inq] = proxy
self.sockets.append(inq)
LOG.info(_("In reactor registered"))
def consume_in_thread(self):
@excutils.forever_retry_uncaught_exceptions
def _consume(sock):
LOG.info(_("Consuming socket"))
while True:
self.consume(sock)
for k in self.proxies.keys():
self.threads.append(
self.pool.spawn(_consume, k)
)
def wait(self):
for t in self.threads:
t.wait()
def close(self):
for s in self.sockets:
s.close()
for t in self.threads:
t.kill()
class ZmqProxy(ZmqBaseReactor):
"""A consumer class implementing a topic-based proxy.
Forwards to IPC sockets.
"""
def __init__(self, conf):
super(ZmqProxy, self).__init__(conf)
pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\'))
self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep)))
self.topic_proxy = {}
def consume(self, sock):
ipc_dir = CONF.rpc_zmq_ipc_dir
data = sock.recv(copy=False)
topic = data[1].bytes
if topic.startswith('fanout~'):
sock_type = zmq.PUB
topic = topic.split('.', 1)[0]
elif topic.startswith('zmq_replies'):
sock_type = zmq.PUB
else:
sock_type = zmq.PUSH
if topic not in self.topic_proxy:
def publisher(waiter):
LOG.info(_("Creating proxy for topic: %s"), topic)
try:
# The topic is received over the network,
# don't trust this input.
if self.badchars.search(topic) is not None:
emsg = _("Topic contained dangerous characters.")
LOG.warn(emsg)
raise RPCException(emsg)
out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
(ipc_dir, topic),
sock_type, bind=True)
except RPCException:
waiter.send_exception(*sys.exc_info())
return
self.topic_proxy[topic] = eventlet.queue.LightQueue(
CONF.rpc_zmq_topic_backlog)
self.sockets.append(out_sock)
# It takes some time for a pub socket to open,
# before we can have any faith in doing a send() to it.
if sock_type == zmq.PUB:
eventlet.sleep(.5)
waiter.send(True)
while(True):
data = self.topic_proxy[topic].get()
out_sock.send(data, copy=False)
wait_sock_creation = eventlet.event.Event()
eventlet.spawn(publisher, wait_sock_creation)
try:
wait_sock_creation.wait()
except RPCException:
LOG.error(_("Topic socket file creation failed."))
return
try:
self.topic_proxy[topic].put_nowait(data)
except eventlet.queue.Full:
LOG.error(_("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic})
def consume_in_thread(self):
"""Runs the ZmqProxy service."""
ipc_dir = CONF.rpc_zmq_ipc_dir
consume_in = "tcp://%s:%s" % \
(CONF.rpc_zmq_bind_address,
CONF.rpc_zmq_port)
consumption_proxy = InternalContext(None)
try:
os.makedirs(ipc_dir)
except os.error:
if not os.path.isdir(ipc_dir):
with excutils.save_and_reraise_exception():
LOG.error(_("Required IPC directory does not exist at"
" %s") % (ipc_dir, ))
try:
self.register(consumption_proxy,
consume_in,
zmq.PULL)
except zmq.ZMQError:
if os.access(ipc_dir, os.X_OK):
with excutils.save_and_reraise_exception():
LOG.error(_("Permission denied to IPC directory at"
" %s") % (ipc_dir, ))
with excutils.save_and_reraise_exception():
LOG.error(_("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use."))
super(ZmqProxy, self).consume_in_thread()
def unflatten_envelope(packenv):
"""Unflattens the RPC envelope.
Takes a list and returns a dictionary.
i.e. [1,2,3,4] => {1: 2, 3: 4}
"""
i = iter(packenv)
h = {}
try:
while True:
k = i.next()
h[k] = i.next()
except StopIteration:
return h
class ZmqReactor(ZmqBaseReactor):
"""A consumer class implementing a consumer for messages.
Can also be used as a 1:1 proxy
"""
def __init__(self, conf):
super(ZmqReactor, self).__init__(conf)
def consume(self, sock):
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
proxy = self.proxies[sock]
if data[2] == 'cast': # Legacy protocol
packenv = data[3]
ctx, msg = _deserialize(packenv)
request = rpc_common.deserialize_msg(msg)
ctx = RpcContext.unmarshal(ctx)
elif data[2] == 'impl_zmq_v2':
packenv = data[4:]
msg = unflatten_envelope(packenv)
request = rpc_common.deserialize_msg(msg)
# Unmarshal only after verifying the message.
ctx = RpcContext.unmarshal(data[3])
else:
LOG.error(_("ZMQ Envelope version unsupported or unknown."))
return
self.pool.spawn_n(self.process, proxy, ctx, request)
class Connection(rpc_common.Connection):
"""Manages connections and threads."""
def __init__(self, conf):
self.topics = []
self.reactor = ZmqReactor(conf)
def create_consumer(self, topic, proxy, fanout=False):
# Register with matchmaker.
_get_matchmaker().register(topic, CONF.rpc_zmq_host)
# Subscription scenarios
if fanout:
sock_type = zmq.SUB
subscribe = ('', fanout)[type(fanout) == str]
topic = 'fanout~' + topic.split('.', 1)[0]
else:
sock_type = zmq.PULL
subscribe = None
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
if topic in self.topics:
LOG.info(_("Skipping topic registration. Already registered."))
return
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
(CONF.rpc_zmq_ipc_dir, topic)
LOG.debug(_("Consumer is a zmq.%s"),
['PULL', 'SUB'][sock_type == zmq.SUB])
self.reactor.register(proxy, inaddr, sock_type,
subscribe=subscribe, in_bind=False)
self.topics.append(topic)
def close(self):
_get_matchmaker().stop_heartbeat()
for topic in self.topics:
_get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
self.reactor.close()
self.topics = []
def wait(self):
self.reactor.wait()
def consume_in_thread(self):
_get_matchmaker().start_heartbeat()
self.reactor.consume_in_thread()
def _cast(addr, context, topic, msg, timeout=None, envelope=False,
_msg_id=None):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
with Timeout(timeout_cast, exception=rpc_common.Timeout):
try:
conn = ZmqClient(addr)
# assumes cast can't return an exception
conn.cast(_msg_id, topic, payload, envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
if 'conn' in vars():
conn.close()
def _call(addr, context, topic, msg, timeout=None,
envelope=False):
# timeout_response is how long we wait for a response
timeout = timeout or CONF.rpc_response_timeout
# The msg_id is used to track replies.
msg_id = uuid.uuid4().hex
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
LOG.debug(_("Creating payload"))
# Curry the original request into a reply method.
mcontext = RpcContext.marshal(context)
payload = {
'method': '-reply',
'args': {
'msg_id': msg_id,
'topic': reply_topic,
# TODO(ewindisch): safe to remove mcontext in I.
'msg': [mcontext, msg]
}
}
LOG.debug(_("Creating queue socket for reply waiter"))
# Messages arriving async.
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
with Timeout(timeout, exception=rpc_common.Timeout):
try:
msg_waiter = ZmqSocket(
"ipc://%s/zmq_topic_zmq_replies.%s" %
(CONF.rpc_zmq_ipc_dir,
CONF.rpc_zmq_host),
zmq.SUB, subscribe=msg_id, bind=False
)
LOG.debug(_("Sending cast"))
_cast(addr, context, topic, payload, envelope)
LOG.debug(_("Cast sent; Waiting reply"))
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug(_("Received message: %s"), msg)
LOG.debug(_("Unpacking response"))
if msg[2] == 'cast': # Legacy version
raw_msg = _deserialize(msg[-1])[-1]
elif msg[2] == 'impl_zmq_v2':
rpc_envelope = unflatten_envelope(msg[4:])
raw_msg = rpc_common.deserialize_msg(rpc_envelope)
else:
raise rpc_common.UnsupportedRpcEnvelopeVersion(
_("Unsupported or unknown ZMQ envelope returned."))
responses = raw_msg['args']['response']
# ZMQError trumps the Timeout error.
except zmq.ZMQError:
raise RPCException("ZMQ Socket Error")
except (IndexError, KeyError):
raise RPCException(_("RPC Message Invalid."))
finally:
if 'msg_waiter' in vars():
msg_waiter.close()
# It seems we don't need to do all of the following,
# but perhaps it would be useful for multicall?
# One effect of this is that we're checking all
# responses for Exceptions.
for resp in responses:
if isinstance(resp, types.DictType) and 'exc' in resp:
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
return responses[-1]
def _multi_send(method, context, topic, msg, timeout=None,
envelope=False, _msg_id=None):
"""Wraps the sending of messages.
Dispatches to the matchmaker and sends message to all relevant hosts.
"""
conf = CONF
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
queues = _get_matchmaker().queues(topic)
LOG.debug(_("Sending message(s) to: %s"), queues)
# Don't stack if we have no matchmaker results
if not queues:
LOG.warn(_("No matchmaker results. Not casting."))
# While not strictly a timeout, callers know how to handle
# this exception and a timeout isn't too big a lie.
raise rpc_common.Timeout(_("No match from matchmaker."))
# This supports brokerless fanout (addresses > 1)
for queue in queues:
(_topic, ip_addr) = queue
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
_topic, msg, timeout, envelope,
_msg_id)
return
return method(_addr, context, _topic, msg, timeout,
envelope)
def create_connection(conf, new=True):
return Connection(conf)
def multicall(conf, *args, **kwargs):
"""Multiple calls."""
return _multi_send(_call, *args, **kwargs)
def call(conf, *args, **kwargs):
"""Send a message, expect a response."""
data = _multi_send(_call, *args, **kwargs)
return data[-1]
def cast(conf, *args, **kwargs):
"""Send a message expecting no reply."""
_multi_send(_cast, *args, **kwargs)
def fanout_cast(conf, context, topic, msg, **kwargs):
"""Send a message to all listening and expect no reply."""
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
def notify(conf, context, topic, msg, envelope):
"""Send notification event.
Notifications are sent to topic-priority.
This differs from the AMQP drivers which send to topic.priority.
"""
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic = topic.replace('.', '-')
cast(conf, context, topic, msg, envelope=envelope)
def cleanup():
"""Clean up resources in use by implementation."""
global ZMQ_CTX
if ZMQ_CTX:
ZMQ_CTX.term()
ZMQ_CTX = None
global matchmaker
matchmaker = None
def _get_ctxt():
if not zmq:
raise ImportError("Failed to import eventlet.green.zmq")
global ZMQ_CTX
if not ZMQ_CTX:
ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
return ZMQ_CTX
def _get_matchmaker(*args, **kwargs):
global matchmaker
if not matchmaker:
mm = CONF.rpc_zmq_matchmaker
if mm.endswith('matchmaker.MatchMakerRing'):
mm.replace('matchmaker', 'matchmaker_ring')
LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use'
' %(new)s instead') % dict(
orig=CONF.rpc_zmq_matchmaker, new=mm))
matchmaker = importutils.import_object(mm, *args, **kwargs)
return matchmaker
|
|
#!/usr/bin/env python
#
# Copyright 2006-2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This is a utility for converting JavaScript source code into uint16_t[],
that are used for embedding JavaScript code into the Node.js binary.
"""
import argparse
import os
import re
import functools
import codecs
def ReadFile(filename):
if is_verbose:
print(filename)
with codecs.open(filename, "r", "utf-8") as f:
lines = f.read()
return lines
TEMPLATE = """
#include "env-inl.h"
#include "node_native_module.h"
#include "node_internals.h"
namespace node {{
namespace native_module {{
{0}
void NativeModuleLoader::LoadJavaScriptSource() {{
{1}
}}
UnionBytes NativeModuleLoader::GetConfig() {{
return UnionBytes(config_raw, {2}); // config.gypi
}}
}} // namespace native_module
}} // namespace node
"""
ONE_BYTE_STRING = """
static const uint8_t {0}[] = {{
{1}
}};
"""
TWO_BYTE_STRING = """
static const uint16_t {0}[] = {{
{1}
}};
"""
INITIALIZER = 'source_.emplace("{0}", UnionBytes{{{1}, {2}}});'
CONFIG_GYPI_ID = 'config_raw'
SLUGGER_RE =re.compile('[.\-/]')
is_verbose = False
def GetDefinition(var, source, step=30):
template = ONE_BYTE_STRING
code_points = [ord(c) for c in source]
if any(c > 127 for c in code_points):
template = TWO_BYTE_STRING
# Treat non-ASCII as UTF-8 and encode as UTF-16 Little Endian.
encoded_source = bytearray(source, 'utf-16le')
code_points = [
encoded_source[i] + (encoded_source[i + 1] * 256)
for i in range(0, len(encoded_source), 2)
]
# For easier debugging, align to the common 3 char for code-points.
elements_s = ['%3s' % x for x in code_points]
# Put no more then `step` code-points in a line.
slices = [elements_s[i:i + step] for i in range(0, len(elements_s), step)]
lines = [','.join(s) for s in slices]
array_content = ',\n'.join(lines)
definition = template.format(var, array_content)
return definition, len(code_points)
def AddModule(filename, definitions, initializers):
code = ReadFile(filename)
name = NormalizeFileName(filename)
slug = SLUGGER_RE.sub('_', name)
var = slug + '_raw'
definition, size = GetDefinition(var, code)
initializer = INITIALIZER.format(name, var, size)
definitions.append(definition)
initializers.append(initializer)
def NormalizeFileName(filename):
split = filename.split(os.path.sep)
if split[0] == 'deps':
split = ['internal'] + split
else: # `lib/**/*.js` so drop the 'lib' part
split = split[1:]
if len(split):
filename = '/'.join(split)
return os.path.splitext(filename)[0]
def JS2C(source_files, target):
# Build source code lines
definitions = []
initializers = []
for filename in source_files['.js']:
AddModule(filename, definitions, initializers)
config_def, config_size = handle_config_gypi(source_files['config.gypi'])
definitions.append(config_def)
# Emit result
definitions = ''.join(definitions)
initializers = '\n '.join(initializers)
out = TEMPLATE.format(definitions, initializers, config_size)
write_if_chaged(out, target)
def handle_config_gypi(config_filename):
# if its a gypi file we're going to want it as json
# later on anyway, so get it out of the way now
config = ReadFile(config_filename)
config = jsonify(config)
config_def, config_size = GetDefinition(CONFIG_GYPI_ID, config)
return config_def, config_size
def jsonify(config):
# 1. string comments
config = re.sub(r'#.*?\n', '', config)
# 3. normalize string literals from ' into "
config = re.sub('\'', '"', config)
# 2. turn pseudo-booleans strings into Booleans
config = re.sub('"true"', 'true', config)
config = re.sub('"false"', 'false', config)
return config
def write_if_chaged(content, target):
if os.path.exists(target):
with open(target, 'rt') as existing:
old_content = existing.read()
else:
old_content = ''
if old_content == content:
os.utime(target, None)
return
with open(target, "wt") as output:
output.write(content)
def SourceFileByExt(files_by_ext, filename):
"""
:type files_by_ext: dict
:type filename: str
:rtype: dict
"""
ext = os.path.splitext(filename)[-1]
files_by_ext.setdefault(ext, []).append(filename)
return files_by_ext
def main():
parser = argparse.ArgumentParser(
description='Convert code files into `uint16_t[]`s',
fromfile_prefix_chars='@'
)
parser.add_argument('--target', help='output file')
parser.add_argument('--verbose', action='store_true', help='output file')
parser.add_argument('sources', nargs='*', help='input files')
options = parser.parse_args()
global is_verbose
is_verbose = options.verbose
source_files = functools.reduce(SourceFileByExt, options.sources, {})
# Should have exactly 2 types: `.js`, and `.gypi`
assert len(source_files) == 2
# Currently config.gypi is the only `.gypi` file allowed
assert source_files['.gypi'] == ['config.gypi']
source_files['config.gypi'] = source_files.pop('.gypi')[0]
JS2C(source_files, options.target)
if __name__ == "__main__":
main()
|
|
# Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bits and pieces used by the driver that don't really fit elsewhere."""
import collections
import datetime
import struct
import bson
from bson.codec_options import CodecOptions
from bson.py3compat import itervalues, string_type, iteritems, u
from bson.son import SON
from pymongo import ASCENDING
from pymongo.errors import (CursorNotFound,
DuplicateKeyError,
ExecutionTimeout,
NotMasterError,
OperationFailure,
WriteError,
WriteConcernError,
WTimeoutError)
from pymongo.message import _Query, _convert_exception
from pymongo.read_concern import DEFAULT_READ_CONCERN
_UUNDER = u("_")
def _gen_index_name(keys):
"""Generate an index name from the set of fields it is over."""
return _UUNDER.join(["%s_%s" % item for item in keys])
def _index_list(key_or_list, direction=None):
"""Helper to generate a list of (key, direction) pairs.
Takes such a list, or a single key, or a single key and direction.
"""
if direction is not None:
return [(key_or_list, direction)]
else:
if isinstance(key_or_list, string_type):
return [(key_or_list, ASCENDING)]
elif not isinstance(key_or_list, (list, tuple)):
raise TypeError("if no direction is specified, "
"key_or_list must be an instance of list")
return key_or_list
def _index_document(index_list):
"""Helper to generate an index specifying document.
Takes a list of (key, direction) pairs.
"""
if isinstance(index_list, collections.Mapping):
raise TypeError("passing a dict to sort/create_index/hint is not "
"allowed - use a list of tuples instead. did you "
"mean %r?" % list(iteritems(index_list)))
elif not isinstance(index_list, (list, tuple)):
raise TypeError("must use a list of (key, direction) pairs, "
"not: " + repr(index_list))
if not len(index_list):
raise ValueError("key_or_list must not be the empty list")
index = SON()
for (key, value) in index_list:
if not isinstance(key, string_type):
raise TypeError("first item in each key pair must be a string")
if not isinstance(value, (string_type, int, collections.Mapping)):
raise TypeError("second item in each key pair must be 1, -1, "
"'2d', 'geoHaystack', or another valid MongoDB "
"index specifier.")
index[key] = value
return index
def _unpack_response(response, cursor_id=None, codec_options=CodecOptions()):
"""Unpack a response from the database.
Check the response for errors and unpack, returning a dictionary
containing the response data.
Can raise CursorNotFound, NotMasterError, ExecutionTimeout, or
OperationFailure.
:Parameters:
- `response`: byte string as returned from the database
- `cursor_id` (optional): cursor_id we sent to get this response -
used for raising an informative exception when we get cursor id not
valid at server response
- `codec_options` (optional): an instance of
:class:`~bson.codec_options.CodecOptions`
"""
response_flag = struct.unpack("<i", response[:4])[0]
if response_flag & 1:
# Shouldn't get this response if we aren't doing a getMore
assert cursor_id is not None
# Fake a getMore command response. OP_GET_MORE provides no document.
msg = "Cursor not found, cursor id: %d" % (cursor_id,)
errobj = {"ok": 0, "errmsg": msg, "code": 43}
raise CursorNotFound(msg, 43, errobj)
elif response_flag & 2:
error_object = bson.BSON(response[20:]).decode()
# Fake the ok field if it doesn't exist.
error_object.setdefault("ok", 0)
if error_object["$err"].startswith("not master"):
raise NotMasterError(error_object["$err"], error_object)
elif error_object.get("code") == 50:
raise ExecutionTimeout(error_object.get("$err"),
error_object.get("code"),
error_object)
raise OperationFailure("database error: %s" %
error_object.get("$err"),
error_object.get("code"),
error_object)
result = {"cursor_id": struct.unpack("<q", response[4:12])[0],
"starting_from": struct.unpack("<i", response[12:16])[0],
"number_returned": struct.unpack("<i", response[16:20])[0],
"data": bson.decode_all(response[20:], codec_options)}
assert len(result["data"]) == result["number_returned"]
return result
def _check_command_response(response, msg=None, allowable_errors=None):
"""Check the response to a command for errors.
"""
if "ok" not in response:
# Server didn't recognize our message as a command.
raise OperationFailure(response.get("$err"),
response.get("code"),
response)
# TODO: remove, this is moving to _check_gle_response
if response.get("wtimeout", False):
# MongoDB versions before 1.8.0 return the error message in an "errmsg"
# field. If "errmsg" exists "err" will also exist set to None, so we
# have to check for "errmsg" first.
raise WTimeoutError(response.get("errmsg", response.get("err")),
response.get("code"),
response)
if not response["ok"]:
details = response
# Mongos returns the error details in a 'raw' object
# for some errors.
if "raw" in response:
for shard in itervalues(response["raw"]):
# Grab the first non-empty raw error from a shard.
if shard.get("errmsg") and not shard.get("ok"):
details = shard
break
errmsg = details["errmsg"]
if allowable_errors is None or errmsg not in allowable_errors:
# Server is "not master" or "recovering"
if (errmsg.startswith("not master")
or errmsg.startswith("node is recovering")):
raise NotMasterError(errmsg, response)
# Server assertion failures
if errmsg == "db assertion failure":
errmsg = ("db assertion failure, assertion: '%s'" %
details.get("assertion", ""))
raise OperationFailure(errmsg,
details.get("assertionCode"),
response)
# Other errors
code = details.get("code")
# findAndModify with upsert can raise duplicate key error
if code in (11000, 11001, 12582):
raise DuplicateKeyError(errmsg, code, response)
elif code == 50:
raise ExecutionTimeout(errmsg, code, response)
elif code == 43:
raise CursorNotFound(errmsg, code, response)
msg = msg or "%s"
raise OperationFailure(msg % errmsg, code, response)
def _check_gle_response(response):
"""Return getlasterror response as a dict, or raise OperationFailure."""
response = _unpack_response(response)
assert response["number_returned"] == 1
result = response["data"][0]
# Did getlasterror itself fail?
_check_command_response(result)
if result.get("wtimeout", False):
# MongoDB versions before 1.8.0 return the error message in an "errmsg"
# field. If "errmsg" exists "err" will also exist set to None, so we
# have to check for "errmsg" first.
raise WTimeoutError(result.get("errmsg", result.get("err")),
result.get("code"),
result)
error_msg = result.get("err", "")
if error_msg is None:
return result
if error_msg.startswith("not master"):
raise NotMasterError(error_msg, result)
details = result
# mongos returns the error code in an error object for some errors.
if "errObjects" in result:
for errobj in result["errObjects"]:
if errobj.get("err") == error_msg:
details = errobj
break
code = details.get("code")
if code in (11000, 11001, 12582):
raise DuplicateKeyError(details["err"], code, result)
raise OperationFailure(details["err"], code, result)
def _first_batch(sock_info, db, coll, query, ntoreturn,
slave_ok, codec_options, read_preference, cmd, listeners):
"""Simple query helper for retrieving a first (and possibly only) batch."""
query = _Query(
0, db, coll, 0, ntoreturn, query, None,
codec_options, read_preference, 0, 0, DEFAULT_READ_CONCERN)
name = next(iter(cmd))
duration = None
publish = listeners.enabled_for_commands
if publish:
start = datetime.datetime.now()
request_id, msg, max_doc_size = query.get_message(slave_ok,
sock_info.is_mongos)
if publish:
encoding_duration = datetime.datetime.now() - start
listeners.publish_command_start(
cmd, db, request_id, sock_info.address)
start = datetime.datetime.now()
sock_info.send_message(msg, max_doc_size)
response = sock_info.receive_message(1, request_id)
try:
result = _unpack_response(response, None, codec_options)
except Exception as exc:
if publish:
duration = (datetime.datetime.now() - start) + encoding_duration
if isinstance(exc, (NotMasterError, OperationFailure)):
failure = exc.details
else:
failure = _convert_exception(exc)
listeners.publish_command_failure(
duration, failure, name, request_id, sock_info.address)
raise
if publish:
duration = (datetime.datetime.now() - start) + encoding_duration
listeners.publish_command_success(
duration, result, name, request_id, sock_info.address)
return result
def _check_write_command_response(results):
"""Backward compatibility helper for write command error handling.
"""
errors = [res for res in results
if "writeErrors" in res[1] or "writeConcernError" in res[1]]
if errors:
# If multiple batches had errors
# raise from the last batch.
offset, result = errors[-1]
# Prefer write errors over write concern errors
write_errors = result.get("writeErrors")
if write_errors:
# If the last batch had multiple errors only report
# the last error to emulate continue_on_error.
error = write_errors[-1]
error["index"] += offset
if error.get("code") == 11000:
raise DuplicateKeyError(error.get("errmsg"), 11000, error)
raise WriteError(error.get("errmsg"), error.get("code"), error)
else:
error = result["writeConcernError"]
if "errInfo" in error and error["errInfo"].get('wtimeout'):
# Make sure we raise WTimeoutError
raise WTimeoutError(
error.get("errmsg"), error.get("code"), error)
raise WriteConcernError(
error.get("errmsg"), error.get("code"), error)
def _fields_list_to_dict(fields, option_name):
"""Takes a sequence of field names and returns a matching dictionary.
["a", "b"] becomes {"a": 1, "b": 1}
and
["a.b.c", "d", "a.c"] becomes {"a.b.c": 1, "d": 1, "a.c": 1}
"""
if isinstance(fields, collections.Mapping):
return fields
if isinstance(fields, collections.Sequence):
if not all(isinstance(field, string_type) for field in fields):
raise TypeError("%s must be a list of key names, each an "
"instance of %s" % (option_name,
string_type.__name__))
return dict.fromkeys(fields, 1)
raise TypeError("%s must be a mapping or "
"list of key names" % (option_name,))
|
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Project Management module views
"""
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.http import HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
from django.db.models import Q
from treeio.core.models import Object, ModuleSetting, UpdateRecord
from treeio.core.views import user_denied
from treeio.core.rendering import render_to_response
from treeio.core.decorators import treeio_login_required, handle_response_format
from treeio.projects.models import Project, Milestone, Task, TaskStatus, TaskTimeSlot
from treeio.projects.forms import ProjectForm, MilestoneForm, TaskForm, FilterForm, TaskRecordForm, \
MassActionForm, TaskTimeSlotForm, TaskStatusForm, SettingsForm
from django.utils.translation import ugettext as _
from datetime import datetime
import json
def _get_filter_query(args):
"Creates a query to filter Tasks based on FilterForm arguments"
query = Q()
for arg in args:
if hasattr(Task, arg) and args[arg]:
kwargs = {str(arg + '__id'): long(args[arg])}
query = query & Q(**kwargs)
return query
def _get_default_context(request):
"Returns default context as a dict()"
projects = Object.filter_by_request(request, Project.objects)
statuses = Object.filter_by_request(request, TaskStatus.objects)
massform = MassActionForm(request.user.get_profile())
context = {'projects': projects,
'statuses': statuses,
'massform': massform}
return context
def _process_mass_form(f):
"Pre-process request to handle mass action form for Tasks and Milestones"
def wrap(request, *args, **kwargs):
"Wrap"
if 'massform' in request.POST:
for key in request.POST:
if 'mass-milestone' in key:
try:
milestone = Milestone.objects.get(pk=request.POST[key])
form = MassActionForm(
request.user.get_profile(), request.POST, instance=milestone)
if form.is_valid() and request.user.get_profile().has_permission(milestone, mode='w'):
form.save()
except Exception:
pass
for key in request.POST:
if 'mass-task' in key:
try:
task = Task.objects.get(pk=request.POST[key])
form = MassActionForm(
request.user.get_profile(), request.POST, instance=task)
if form.is_valid() and request.user.get_profile().has_permission(task, mode='w'):
form.save()
except Exception:
pass
return f(request, *args, **kwargs)
wrap.__doc__ = f.__doc__
wrap.__name__ = f.__name__
return wrap
@handle_response_format
@treeio_login_required
@_process_mass_form
def index(request, response_format='html'):
"Project Management index page"
query = Q(parent__isnull=True)
if request.GET:
if 'status' in request.GET and request.GET['status']:
query = query & _get_filter_query(request.GET)
else:
query = query & Q(
status__hidden=False) & _get_filter_query(request.GET)
else:
query = query & Q(status__hidden=False)
tasks = Object.filter_by_request(request, Task.objects.filter(query))
milestones = Object.filter_by_request(
request, Milestone.objects.filter(status__hidden=False))
filters = FilterForm(request.user.get_profile(), '', request.GET)
context = _get_default_context(request)
context.update({'milestones': milestones,
'tasks': tasks,
'filters': filters})
return render_to_response('projects/index', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
@_process_mass_form
def index_owned(request, response_format='html'):
"Tasks owned by current user"
query = Q(
parent__isnull=True, caller__related_user=request.user.get_profile())
if request.GET:
if 'status' in request.GET and request.GET['status']:
query = query & _get_filter_query(request.GET)
else:
query = query & Q(
status__hidden=False) & _get_filter_query(request.GET)
else:
query = query & Q(status__hidden=False)
tasks = Object.filter_by_request(request, Task.objects.filter(query))
milestones = Object.filter_by_request(
request, Milestone.objects.filter(status__hidden=False))
filters = FilterForm(request.user.get_profile(), 'status', request.GET)
context = _get_default_context(request)
context.update({'milestones': milestones,
'tasks': tasks,
'filters': filters})
return render_to_response('projects/index_owned', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
@_process_mass_form
def index_assigned(request, response_format='html'):
"Tasks assigned to current user"
query = Q(parent__isnull=True, assigned=request.user.get_profile())
if request.GET:
if 'status' in request.GET and request.GET['status']:
query = query & _get_filter_query(request.GET)
else:
query = query & Q(
status__hidden=False) & _get_filter_query(request.GET)
else:
query = query & Q(status__hidden=False)
tasks = Object.filter_by_request(request, Task.objects.filter(query))
milestones = Object.filter_by_request(
request, Milestone.objects.filter(status__hidden=False))
filters = FilterForm(request.user.get_profile(), 'assigned', request.GET)
context = _get_default_context(request)
context.update({'milestones': milestones,
'tasks': tasks,
'filters': filters})
return render_to_response('projects/index_assigned', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
@_process_mass_form
def index_by_status(request, status_id, response_format='html'):
"Sort tasks by status"
status = get_object_or_404(TaskStatus, pk=status_id)
if not request.user.get_profile().has_permission(status):
return user_denied(request, message="You don't have access to this Task Status")
query = Q(parent__isnull=True, status=status)
if request.GET:
query = query & _get_filter_query(request.GET)
tasks = Object.filter_by_request(request, Task.objects.filter(query))
milestones = Object.filter_by_request(
request, Milestone.objects.filter(task__status=status).distinct())
filters = FilterForm(request.user.get_profile(), 'status', request.GET)
context = _get_default_context(request)
context.update({'milestones': milestones,
'tasks': tasks,
'status': status,
'filters': filters})
return render_to_response('projects/index_by_status', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
@_process_mass_form
def index_in_progress(request, response_format='html'):
"A page with a list of tasks in progress"
query = Q(parent__isnull=True)
if request.GET:
query = query & Q(
status__hidden=False) & _get_filter_query(request.GET)
else:
query = query & Q(status__hidden=False)
tasks = Object.filter_by_request(request, Task.objects.filter(query))
milestones = Object.filter_by_request(
request, Milestone.objects.filter(status__hidden=False))
filters = FilterForm(request.user.get_profile(), 'status', request.GET)
time_slots = Object.filter_by_request(
request, TaskTimeSlot.objects.filter(time_from__isnull=False, time_to__isnull=True))
context = _get_default_context(request)
context.update({'milestones': milestones,
'tasks': tasks,
'filters': filters,
'time_slots': time_slots})
return render_to_response('projects/index_in_progress', context,
context_instance=RequestContext(request), response_format=response_format)
#
# Projects
#
@handle_response_format
@treeio_login_required
def project_add(request, response_format='html'):
"New project form"
if request.POST:
if not 'cancel' in request.POST:
project = Project()
form = ProjectForm(
request.user.get_profile(), None, request.POST, instance=project)
if form.is_valid():
project = form.save()
project.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_project_view', args=[project.id]))
else:
return HttpResponseRedirect(reverse('projects'))
else:
form = ProjectForm(request.user.get_profile(), None)
context = _get_default_context(request)
context.update({'form': form})
return render_to_response('projects/project_add', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def project_add_typed(request, project_id=None, response_format='html'):
"Project add to preselected parent project"
parent_project = None
if project_id:
parent_project = get_object_or_404(Project, pk=project_id)
if not request.user.get_profile().has_permission(parent_project, mode='x'):
parent_project = None
if request.POST:
if not 'cancel' in request.POST:
project = Project()
form = ProjectForm(
request.user.get_profile(), project_id, request.POST, instance=project)
if form.is_valid():
project = form.save()
project.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_project_view', args=[project.id]))
else:
return HttpResponseRedirect(reverse('projects'))
else:
form = ProjectForm(request.user.get_profile(), project_id)
context = _get_default_context(request)
context.update({'form': form, 'project': parent_project})
return render_to_response('projects/project_add_typed', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
@_process_mass_form
def project_view(request, project_id, response_format='html'):
"Single project view page"
project = get_object_or_404(Project, pk=project_id)
if not request.user.get_profile().has_permission(project):
return user_denied(request, message="You don't have access to this Project")
query = Q(parent__isnull=True, project=project)
if request.GET:
if 'status' in request.GET and request.GET['status']:
query = query & _get_filter_query(request.GET)
else:
query = query & Q(
status__hidden=False) & _get_filter_query(request.GET)
else:
query = query & Q(status__hidden=False)
if request.user.get_profile().has_permission(project, mode='r'):
if request.POST:
record = UpdateRecord()
record.record_type = 'manual'
form = TaskRecordForm(
request.user.get_profile(), request.POST, instance=record)
if form.is_valid():
record = form.save()
record.set_user_from_request(request)
record.save()
record.about.add(project)
project.set_last_updated()
return HttpResponseRedirect(reverse('projects_project_view', args=[project.id]))
else:
form = TaskRecordForm(request.user.get_profile())
else:
form = None
tasks = Object.filter_by_request(request, Task.objects.filter(query))
tasks_progress = float(0)
tasks_progress_query = Object.filter_by_request(
request, Task.objects.filter(Q(parent__isnull=True, project=project)))
if tasks_progress_query:
for task in tasks_progress_query:
if not task.status.active:
tasks_progress += 1
tasks_progress = (tasks_progress / len(tasks_progress_query)) * 100
tasks_progress = round(tasks_progress, ndigits=1)
filters = FilterForm(request.user.get_profile(), 'project', request.GET)
milestones = Object.filter_by_request(request,
Milestone.objects.filter(project=project).filter(status__hidden=False))
subprojects = Project.objects.filter(parent=project)
context = _get_default_context(request)
context.update({'project': project,
'milestones': milestones,
'tasks': tasks,
'tasks_progress': tasks_progress,
'record_form': form,
'subprojects': subprojects,
'filters': filters})
return render_to_response('projects/project_view', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def project_edit(request, project_id, response_format='html'):
"Project edit page"
project = get_object_or_404(Project, pk=project_id)
if not request.user.get_profile().has_permission(project, mode='w'):
return user_denied(request, message="You don't have access to this Project")
if request.POST:
if not 'cancel' in request.POST:
form = ProjectForm(
request.user.get_profile(), None, request.POST, instance=project)
if form.is_valid():
project = form.save()
return HttpResponseRedirect(reverse('projects_project_view', args=[project.id]))
else:
return HttpResponseRedirect(reverse('projects_project_view', args=[project.id]))
else:
form = ProjectForm(request.user.get_profile(), None, instance=project)
context = _get_default_context(request)
context.update({'form': form, 'project': project})
return render_to_response('projects/project_edit', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def project_delete(request, project_id, response_format='html'):
"Project delete"
project = get_object_or_404(Project, pk=project_id)
if not request.user.get_profile().has_permission(project, mode='w'):
return user_denied(request, message="You don't have access to this Project")
if request.POST:
if 'delete' in request.POST:
if 'trash' in request.POST:
project.trash = True
project.save()
else:
project.delete()
return HttpResponseRedirect(reverse('projects_index'))
elif 'cancel' in request.POST:
return HttpResponseRedirect(reverse('projects_project_view', args=[project.id]))
context = _get_default_context(request)
context.update({'project': project})
return render_to_response('projects/project_delete', context,
context_instance=RequestContext(request), response_format=response_format)
#
# Milestones
#
@handle_response_format
@treeio_login_required
def milestone_add(request, response_format='html'):
"New milestone form"
if request.POST:
if not 'cancel' in request.POST:
milestone = Milestone()
form = MilestoneForm(
request.user.get_profile(), None, request.POST, instance=milestone)
if form.is_valid():
milestone = form.save()
milestone.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_milestone_view', args=[milestone.id]))
else:
return HttpResponseRedirect(reverse('projects'))
else:
form = MilestoneForm(request.user.get_profile(), None)
context = _get_default_context(request)
context.update({'form': form})
return render_to_response('projects/milestone_add', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def milestone_add_typed(request, project_id=None, response_format='html'):
"Milestone add to preselected project"
project = None
if project_id:
project = get_object_or_404(Project, pk=project_id)
if not request.user.get_profile().has_permission(project, mode='x'):
project = None
if request.POST:
if not 'cancel' in request.POST:
milestone = Milestone()
form = MilestoneForm(
request.user.get_profile(), project_id, request.POST, instance=milestone)
if form.is_valid():
milestone = form.save()
milestone.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_milestone_view', args=[milestone.id]))
else:
return HttpResponseRedirect(reverse('projects'))
else:
form = MilestoneForm(request.user.get_profile(), project_id)
context = _get_default_context(request)
context.update({'form': form, 'project': project})
return render_to_response('projects/milestone_add_typed', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
@_process_mass_form
def milestone_view(request, milestone_id, response_format='html'):
"Single milestone view page"
milestone = get_object_or_404(Milestone, pk=milestone_id)
project = milestone.project
if not request.user.get_profile().has_permission(milestone):
return user_denied(request, message="You don't have access to this Milestone")
query = Q(milestone=milestone, parent__isnull=True)
if request.GET:
if 'status' in request.GET and request.GET['status']:
query = query & _get_filter_query(request.GET)
else:
query = query & Q(
status__hidden=False) & _get_filter_query(request.GET)
tasks = Object.filter_by_request(request, Task.objects.filter(query))
else:
tasks = Object.filter_by_request(request,
Task.objects.filter(query & Q(status__hidden=False)))
filters = FilterForm(request.user.get_profile(), 'milestone', request.GET)
tasks_progress = float(0)
tasks_progress_query = Object.filter_by_request(
request, Task.objects.filter(Q(parent__isnull=True, milestone=milestone)))
if tasks_progress_query:
for task in tasks_progress_query:
if not task.status.active:
tasks_progress += 1
tasks_progress = (tasks_progress / len(tasks_progress_query)) * 100
tasks_progress = round(tasks_progress, ndigits=1)
context = _get_default_context(request)
context.update({'milestone': milestone,
'tasks': tasks,
'tasks_progress': tasks_progress,
'filters': filters,
'project': project})
return render_to_response('projects/milestone_view', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def milestone_edit(request, milestone_id, response_format='html'):
"Milestone edit page"
milestone = get_object_or_404(Milestone, pk=milestone_id)
project = milestone.project
if not request.user.get_profile().has_permission(milestone, mode='w'):
return user_denied(request, message="You don't have access to this Milestone")
if request.POST:
if not 'cancel' in request.POST:
form = MilestoneForm(
request.user.get_profile(), None, request.POST, instance=milestone)
if form.is_valid():
milestone = form.save()
return HttpResponseRedirect(reverse('projects_milestone_view', args=[milestone.id]))
else:
return HttpResponseRedirect(reverse('projects_milestone_view', args=[milestone.id]))
else:
form = MilestoneForm(
request.user.get_profile(), None, instance=milestone)
context = _get_default_context(request)
context.update({'form': form,
'milestone': milestone,
'project': project})
return render_to_response('projects/milestone_edit', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def milestone_delete(request, milestone_id, response_format='html'):
"Milestone delete"
milestone = get_object_or_404(Milestone, pk=milestone_id)
project = milestone.project
if not request.user.get_profile().has_permission(milestone, mode='w'):
return user_denied(request, message="You don't have access to this Milestone")
query = Q(milestone=milestone, parent__isnull=True)
if request.GET:
query = query & _get_filter_query(request.GET)
tasks = Object.filter_by_request(request, Task.objects.filter(query))
if request.POST:
if 'delete' in request.POST:
if 'trash' in request.POST:
milestone.trash = True
milestone.save()
else:
milestone.delete()
return HttpResponseRedirect(reverse('projects_index'))
elif 'cancel' in request.POST:
return HttpResponseRedirect(reverse('projects_milestone_view', args=[milestone.id]))
context = _get_default_context(request)
context.update({'milestone': milestone,
'tasks': tasks,
'project': project})
return render_to_response('projects/milestone_delete', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def milestone_set_status(request, milestone_id, status_id, response_format='html'):
"Milestone quick set: Status"
milestone = get_object_or_404(Milestone, pk=milestone_id)
if not request.user.get_profile().has_permission(milestone, mode='x'):
return user_denied(request, message="You don't have access to this Milestone")
status = get_object_or_404(TaskStatus, pk=status_id)
if not request.user.get_profile().has_permission(status):
return user_denied(request, message="You don't have access to this Milestone Status")
if not milestone.status == status:
milestone.status = status
milestone.save()
return milestone_view(request, milestone_id, response_format)
#
# Tasks
#
@handle_response_format
@treeio_login_required
def task_add(request, response_format='html'):
"New task form"
if request.POST:
if not 'cancel' in request.POST:
task = Task()
form = TaskForm(
request.user.get_profile(), None, None, None, request.POST, instance=task)
if form.is_valid():
task = form.save()
task.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
else:
return HttpResponseRedirect(reverse('projects'))
else:
form = TaskForm(request.user.get_profile(), None, None, None)
context = _get_default_context(request)
context.update({'form': form})
return render_to_response('projects/task_add', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def task_add_typed(request, project_id=None, response_format='html'):
"Task add to preselected project"
project = None
if project_id:
project = get_object_or_404(Project, pk=project_id)
if not request.user.get_profile().has_permission(project, mode='x'):
project = None
if request.POST:
if not 'cancel' in request.POST:
task = Task()
form = TaskForm(
request.user.get_profile(), None, project_id, None, request.POST, instance=task)
if form.is_valid():
task = form.save()
task.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
else:
return HttpResponseRedirect(reverse('projects_project_view', args=[project.id]))
else:
form = TaskForm(request.user.get_profile(), None, project_id, None)
context = _get_default_context(request)
context.update({'form': form,
'project': project})
return render_to_response('projects/task_add_typed', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def task_add_to_milestone(request, milestone_id=None, response_format='html'):
"Task add to preselected project"
milestone = None
if milestone_id:
milestone = get_object_or_404(Milestone, pk=milestone_id)
if not request.user.get_profile().has_permission(milestone, mode='x'):
milestone = None
project = milestone.project
project_id = milestone.project.id
if request.POST:
if not 'cancel' in request.POST:
task = Task()
form = TaskForm(request.user.get_profile(), None,
project_id, milestone_id, request.POST, instance=task)
if form.is_valid():
task = form.save()
task.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
else:
return HttpResponseRedirect(reverse('projects_milestone_view', args=[milestone.id]))
else:
form = TaskForm(
request.user.get_profile(), None, project_id, milestone_id)
context = _get_default_context(request)
context.update({'form': form,
'project': project,
'milestone': milestone})
return render_to_response('projects/task_add_to_milestone', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def task_add_subtask(request, task_id=None, response_format='html'):
"New subtask form"
parent = None
if task_id:
parent = get_object_or_404(Task, pk=task_id)
if not request.user.get_profile().has_permission(parent, mode='x'):
parent = None
if request.POST:
if not 'cancel' in request.POST:
task = Task()
form = TaskForm(
request.user.get_profile(), parent, None, None, request.POST, instance=task)
if form.is_valid():
task = form.save()
task.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_task_view', args=[parent.id]))
else:
return HttpResponseRedirect(reverse('projects_task_view', args=[parent.id]))
else:
form = TaskForm(request.user.get_profile(), parent, None, None)
context = _get_default_context(request)
context.update({'form': form,
'task': parent})
return render_to_response('projects/task_add_subtask', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
@_process_mass_form
def task_view(request, task_id, response_format='html'):
"Single task view page"
task = get_object_or_404(Task, pk=task_id)
if not request.user.get_profile().has_permission(task):
return user_denied(request, message="You don't have access to this Task")
if request.user.get_profile().has_permission(task, mode='x'):
if request.POST:
if 'add-work' in request.POST:
return HttpResponseRedirect(reverse('projects_task_time_slot_add', args=[task.id]))
elif 'start-work' in request.POST:
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
record = UpdateRecord()
record.record_type = 'manual'
form = TaskRecordForm(
request.user.get_profile(), request.POST, instance=record)
if form.is_valid():
record = form.save()
record.set_user_from_request(request)
record.save()
record.about.add(task)
task.set_last_updated()
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
else:
form = TaskRecordForm(request.user.get_profile())
else:
form = None
subtasks = Object.filter_by_request(
request, Task.objects.filter(parent=task))
time_slots = Object.filter_by_request(
request, TaskTimeSlot.objects.filter(task=task))
context = _get_default_context(request)
context.update({'task': task,
'subtasks': subtasks,
'record_form': form,
'time_slots': time_slots})
if 'massform' in context and 'project' in context['massform'].fields:
del context['massform'].fields['project']
return render_to_response('projects/task_view', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def task_edit(request, task_id, response_format='html'):
"Task edit page"
task = get_object_or_404(Task, pk=task_id)
if not request.user.get_profile().has_permission(task, mode='w'):
return user_denied(request, message="You don't have access to this Task")
if request.POST:
if not 'cancel' in request.POST:
form = TaskForm(
request.user.get_profile(), None, None, None, request.POST, instance=task)
if form.is_valid():
task = form.save()
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
else:
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
else:
form = TaskForm(
request.user.get_profile(), None, None, None, instance=task)
context = _get_default_context(request)
context.update({'form': form,
'task': task})
return render_to_response('projects/task_edit', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def task_delete(request, task_id, response_format='html'):
"Task delete"
task = get_object_or_404(Task, pk=task_id)
if not request.user.get_profile().has_permission(task, mode='w'):
return user_denied(request, message="You don't have access to this Task")
if request.POST:
if 'delete' in request.POST:
if 'trash' in request.POST:
task.trash = True
task.save()
else:
task.delete()
return HttpResponseRedirect(reverse('projects_index'))
elif 'cancel' in request.POST:
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
subtasks = Object.filter_by_request(
request, Task.objects.filter(parent=task))
time_slots = Object.filter_by_request(
request, TaskTimeSlot.objects.filter(task=task))
context = _get_default_context(request)
context.update({'task': task,
'subtasks': subtasks,
'time_slots': time_slots})
return render_to_response('projects/task_delete', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def task_set_status(request, task_id, status_id, response_format='html'):
"Task quick set: Status"
task = get_object_or_404(Task, pk=task_id)
if not request.user.get_profile().has_permission(task, mode='x'):
return user_denied(request, message="You don't have access to this Task")
status = get_object_or_404(TaskStatus, pk=status_id)
if not request.user.get_profile().has_permission(status):
return user_denied(request, message="You don't have access to this Task Status")
if not task.status == status:
task.status = status
task.save()
return task_view(request, task_id, response_format)
#
# Task Time Slots
#
@handle_response_format
@treeio_login_required
def task_time_slot_start(request, task_id, response_format='html'):
"Start TaskTimeSlot for preselected Task"
task = get_object_or_404(Task, pk=task_id)
if not request.user.get_profile().has_permission(task, mode='x'):
return user_denied(request, message="You don't have access to this Task")
if not task.is_being_done_by(request.user.get_profile()):
task_time_slot = TaskTimeSlot(
task=task, time_from=datetime.now(), user=request.user.get_profile())
task_time_slot.save()
task_time_slot.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_task_view', args=[task_id]))
@handle_response_format
@treeio_login_required
def task_time_slot_stop(request, slot_id, response_format='html'):
"Stop TaskTimeSlot for preselected Task"
slot = get_object_or_404(TaskTimeSlot, pk=slot_id)
if not request.user.get_profile().has_permission(slot, mode='w'):
return user_denied(request, message="You don't have access to this TaskTimeSlot")
if request.POST and 'stop' in request.POST:
slot.time_to = datetime.now()
slot.details = request.POST['details']
slot.save()
return HttpResponseRedirect(reverse('projects_task_view', args=[slot.task_id]))
@handle_response_format
@treeio_login_required
def task_time_slot_add(request, task_id, response_format='html'):
"Time slot add to preselected task"
task = get_object_or_404(Task, pk=task_id)
if not request.user.get_profile().has_permission(task, mode='x'):
return user_denied(request, message="You don't have access to this Task")
if request.POST:
task_time_slot = TaskTimeSlot(
task=task, time_to=datetime.now(), user=request.user.get_profile())
form = TaskTimeSlotForm(
request.user.get_profile(), task_id, request.POST, instance=task_time_slot)
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
elif form.is_valid():
task_time_slot = form.save()
task_time_slot.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
else:
form = TaskTimeSlotForm(request.user.get_profile(), task_id)
subtasks = Object.filter_by_request(
request, Task.objects.filter(parent=task))
time_slots = Object.filter_by_request(
request, TaskTimeSlot.objects.filter(task=task))
context = _get_default_context(request)
context.update({'form': form,
'task': task,
'subtasks': subtasks,
'time_slots': time_slots})
return render_to_response('projects/task_time_add', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def task_time_slot_view(request, time_slot_id, response_format='html'):
"Task time slot edit page"
task_time_slot = get_object_or_404(TaskTimeSlot, pk=time_slot_id)
task = task_time_slot.task
if not request.user.get_profile().has_permission(task_time_slot) \
and not request.user.get_profile().has_permission(task):
return user_denied(request, message="You don't have access to this Task Time Slot")
context = _get_default_context(request)
context.update({'task_time_slot': task_time_slot,
'task': task})
return render_to_response('projects/task_time_view', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def task_time_slot_edit(request, time_slot_id, response_format='html'):
"Task time slot edit page"
task_time_slot = get_object_or_404(TaskTimeSlot, pk=time_slot_id)
task = task_time_slot.task
if not request.user.get_profile().has_permission(task_time_slot, mode='w') \
and not request.user.get_profile().has_permission(task, mode='w'):
return user_denied(request, message="You don't have access to this Task Time Slot")
if request.POST:
form = TaskTimeSlotForm(
request.user.get_profile(), None, request.POST, instance=task_time_slot)
if form.is_valid():
task_time_slot = form.save()
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
elif 'cancel' in request.POST:
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
else:
form = TaskTimeSlotForm(
request.user.get_profile(), None, instance=task_time_slot)
context = _get_default_context(request)
context.update({'form': form,
'task_time_slot': task_time_slot,
'task': task})
return render_to_response('projects/task_time_edit', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def task_time_slot_delete(request, time_slot_id, response_format='html'):
"Task time slot delete"
task_time_slot = get_object_or_404(TaskTimeSlot, pk=time_slot_id)
task = task_time_slot.task
if not request.user.get_profile().has_permission(task_time_slot, mode='w') \
and not request.user.get_profile().has_permission(task, mode='w'):
return user_denied(request, message="You don't have access to this Task Time Slot")
if request.POST:
if 'delete' in request.POST:
if 'trash' in request.POST:
task_time_slot.trash = True
task_time_slot.save()
else:
task_time_slot.delete()
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
elif 'cancel' in request.POST:
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
context = _get_default_context(request)
context.update({'task_time_slot': task_time_slot,
'task': task})
return render_to_response('projects/task_time_delete', context,
context_instance=RequestContext(request), response_format=response_format)
#
# Task Statuses
#
@handle_response_format
@treeio_login_required
def task_status_add(request, response_format='html'):
"TaskStatus add"
if not request.user.get_profile().is_admin('treeio.projects'):
return user_denied(request, message="You don't have administrator access to the Projects module")
if request.POST:
if not 'cancel' in request.POST:
status = TaskStatus()
form = TaskStatusForm(
request.user.get_profile(), request.POST, instance=status)
if form.is_valid():
status = form.save()
status.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_index_by_status', args=[status.id]))
else:
return HttpResponseRedirect(reverse('projects_settings_view'))
else:
form = TaskStatusForm(request.user.get_profile())
context = _get_default_context(request)
context.update({'form': form})
return render_to_response('projects/status_add', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def task_status_edit(request, status_id, response_format='html'):
"TaskStatus edit"
status = get_object_or_404(TaskStatus, pk=status_id)
if not request.user.get_profile().has_permission(status, mode='w'):
return user_denied(request, message="You don't have access to this Task Status")
if request.POST:
if not 'cancel' in request.POST:
form = TaskStatusForm(
request.user.get_profile(), request.POST, instance=status)
if form.is_valid():
status = form.save()
return HttpResponseRedirect(reverse('projects_index_by_status', args=[status.id]))
else:
return HttpResponseRedirect(reverse('projects_index_by_status', args=[status.id]))
else:
form = TaskStatusForm(request.user.get_profile(), instance=status)
context = _get_default_context(request)
context.update({'form': form,
'status': status})
return render_to_response('projects/status_edit', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def task_status_delete(request, status_id, response_format='html'):
"TaskStatus delete"
status = get_object_or_404(TaskStatus, pk=status_id)
if not request.user.get_profile().has_permission(status, mode='w'):
return user_denied(request, message="You don't have access to this Task Status")
if request.POST:
if 'delete' in request.POST:
if 'trash' in request.POST:
status.trash = True
status.save()
else:
status.delete()
return HttpResponseRedirect(reverse('projects_index'))
elif 'cancel' in request.POST:
return HttpResponseRedirect(reverse('projects_index_by_status', args=[status.id]))
milestones = Object.filter_by_request(request, Milestone.objects)
context = _get_default_context(request)
context.update({'status': status,
'milestones': milestones})
return render_to_response('projects/status_delete', context,
context_instance=RequestContext(request), response_format=response_format)
#
# Settings
#
@handle_response_format
@treeio_login_required
def settings_view(request, response_format='html'):
"Settings"
if not request.user.get_profile().is_admin('treeio.projects'):
return user_denied(request, message="You don't have administrator access to the Projects module")
# default task status
try:
conf = ModuleSetting.get_for_module(
'treeio.projects', 'default_task_status')[0]
default_task_status = TaskStatus.objects.get(
pk=long(conf.value), trash=False)
except Exception:
default_task_status = None
statuses = TaskStatus.objects.filter(trash=False)
context = _get_default_context(request)
context.update({'default_task_status': default_task_status,
'statuses': statuses})
return render_to_response('projects/settings_view', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@treeio_login_required
def settings_edit(request, response_format='html'):
"Settings"
if not request.user.get_profile().is_admin('treeio.projects'):
return user_denied(request, message="You don't have administrator access to the Projects module")
form = None
if request.POST:
if not 'cancel' in request.POST:
form = SettingsForm(request.user.get_profile(), request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('projects_settings_view'))
else:
return HttpResponseRedirect(reverse('projects_settings_view'))
else:
form = SettingsForm(request.user.get_profile())
context = _get_default_context(request)
context.update({'form': form})
return render_to_response('projects/settings_edit', context,
context_instance=RequestContext(request), response_format=response_format)
#
# AJAX lookups
#
@treeio_login_required
def ajax_task_lookup(request, response_format='html'):
"Returns a list of matching tasks"
tasks = []
if request.GET and 'term' in request.GET:
tasks = Task.objects.filter(name__icontains=request.GET['term'])[:10]
return render_to_response('projects/ajax_task_lookup',
{'tasks': tasks},
context_instance=RequestContext(request),
response_format=response_format)
#
# Widgets
#
@treeio_login_required
def widget_tasks_assigned_to_me(request, response_format='html'):
"A list of tasks assigned to current user"
query = Q(parent__isnull=True) & Q(status__hidden=False)
tasks = Object.filter_by_request(request, Task.objects.filter(query))
return render_to_response('projects/widgets/tasks_assigned_to_me',
{'tasks': tasks},
context_instance=RequestContext(request), response_format=response_format)
#
# Gantt Chart
#
@treeio_login_required
def gantt_view(request, project_id, response_format='html'):
projects = Project.objects.filter(trash=False)
project = projects.filter(pk=project_id)[0]
if not project:
raise Http404
ganttData = []
# generate json
milestones = Milestone.objects.filter(project=project).filter(trash=False)
for milestone in milestones:
tasks = Task.objects.filter(milestone=milestone).filter(
start_date__isnull=False).filter(end_date__isnull=False).filter(trash=False)
series = []
for task in tasks:
tlabel = (
task.name[:30] + '..') if len(task.name) > 30 else task.name
tn = '<a href="%s" class="popup-link">%s</a>' % (
reverse('projects_task_view', args=[task.id]), tlabel)
series.append({'id': task.id,
'name': tn,
'label': tlabel,
'start': task.start_date.date().isoformat(),
'end': task.end_date.date().isoformat()})
mlabel = (
milestone.name[:30] + '..') if len(milestone.name) > 30 else milestone.name
mn = '<a href="%s" class="popup-link projects-milestone">%s</a>' % (
reverse('projects_milestone_view', args=[milestone.id]), mlabel)
a = {'id': milestone.id, 'name': mn, 'label': mlabel}
if series:
a['series'] = series
else:
a['series'] = []
if milestone.start_date and milestone.end_date:
a['start'] = milestone.start_date.date().isoformat()
a['end'] = milestone.end_date.date().isoformat()
a['color'] = '#E3F3D9'
if series or (milestone.start_date and milestone.end_date):
ganttData.append(a)
unclassified = Task.objects.filter(project=project).filter(milestone__isnull=True).filter(
start_date__isnull=False).filter(end_date__isnull=False).filter(trash=False)
series = []
for task in unclassified:
tlabel = (task.name[:30] + '..') if len(task.name) > 30 else task.name
tn = '<a href="%s" class="popup-link">%s</a>' % (
reverse('projects_task_view', args=[task.id]), tlabel)
series.append({'id': task.id,
'name': tn,
'label': tlabel,
'start': task.start_date.date().isoformat(),
'end': task.end_date.date().isoformat()})
if series:
ganttData.append(
{'id': 0, 'name': _('Unclassified Tasks'), 'series': series})
if ganttData:
jdata = json.dumps(ganttData)
else:
jdata = None
return render_to_response('projects/gantt_view',
{'jdata': jdata,
'project': project,
'projects': projects},
context_instance=RequestContext(request), response_format=response_format)
#@treeio_login_required
def task_ajax(request, response_format='html'):
"For AJAX"
print request
if request.POST:
print request.POST
# return HttpResponse(options,
# mimetype=settings.HARDTREE_RESPONSE_FORMATS['json'])
|
|
<<<<<<< HEAD
<<<<<<< HEAD
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
from . import Framework
import github.NamedUser
class RawData(Framework.TestCase):
jacquev6RawData = {
'disk_usage': 13812,
'private_gists': 5,
'public_repos': 21,
'subscriptions_url': 'https://api.github.com/users/jacquev6/subscriptions',
'gravatar_id': 'b68de5ae38616c296fa345d2b9df2225',
'hireable': False,
'id': 327146,
'followers_url': 'https://api.github.com/users/jacquev6/followers',
'following_url': 'https://api.github.com/users/jacquev6/following',
'collaborators': 1,
'total_private_repos': 4,
'blog': 'http://vincent-jacques.net',
'followers': 22,
'location': 'Paris, France',
'type': 'User',
'email': 'vincent@vincent-jacques.net',
'bio': '',
'gists_url': 'https://api.github.com/users/jacquev6/gists{/gist_id}',
'owned_private_repos': 4,
'company': 'Criteo',
'events_url': 'https://api.github.com/users/jacquev6/events{/privacy}',
'html_url': 'https://github.com/jacquev6',
'updated_at': '2013-03-12T22:13:32Z',
'plan': {
'collaborators': 1,
'name': 'micro',
'private_repos': 5,
'space': 614400,
},
'received_events_url': 'https://api.github.com/users/jacquev6/received_events',
'starred_url': 'https://api.github.com/users/jacquev6/starred{/owner}{/repo}',
'public_gists': 2,
'name': 'Vincent Jacques',
'organizations_url': 'https://api.github.com/users/jacquev6/orgs',
'url': 'https://api.github.com/users/jacquev6',
'created_at': '2010-07-09T06:10:06Z',
'avatar_url': 'https://secure.gravatar.com/avatar/b68de5ae38616c296fa345d2b9df2225?d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-user-420.png',
'repos_url': 'https://api.github.com/users/jacquev6/repos',
'following': 38,
'login': 'jacquev6',
}
planRawData = {
'collaborators': 1,
'name': 'micro',
'private_repos': 5,
'space': 614400,
}
def testCompletedObject(self):
user = self.g.get_user("jacquev6")
self.assertTrue(user._CompletableGithubObject__completed)
self.assertEqual(user.raw_data, RawData.jacquev6RawData)
def testNotYetCompletedObject(self):
user = self.g.get_user().get_repo("PyGithub").owner
self.assertFalse(user._CompletableGithubObject__completed)
self.assertEqual(user.raw_data, RawData.jacquev6RawData)
self.assertTrue(user._CompletableGithubObject__completed)
def testNonCompletableObject(self):
plan = self.g.get_user().plan
self.assertEqual(plan.raw_data, RawData.planRawData)
def testCreateObjectFromRawData(self):
user = self.g.create_from_raw_data(github.NamedUser.NamedUser, RawData.jacquev6RawData)
self.assertEqual(user._CompletableGithubObject__completed, True)
self.assertEqual(user.name, "Vincent Jacques")
=======
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
from . import Framework
import github.NamedUser
class RawData(Framework.TestCase):
jacquev6RawData = {
'disk_usage': 13812,
'private_gists': 5,
'public_repos': 21,
'subscriptions_url': 'https://api.github.com/users/jacquev6/subscriptions',
'gravatar_id': 'b68de5ae38616c296fa345d2b9df2225',
'hireable': False,
'id': 327146,
'followers_url': 'https://api.github.com/users/jacquev6/followers',
'following_url': 'https://api.github.com/users/jacquev6/following',
'collaborators': 1,
'total_private_repos': 4,
'blog': 'http://vincent-jacques.net',
'followers': 22,
'location': 'Paris, France',
'type': 'User',
'email': 'vincent@vincent-jacques.net',
'bio': '',
'gists_url': 'https://api.github.com/users/jacquev6/gists{/gist_id}',
'owned_private_repos': 4,
'company': 'Criteo',
'events_url': 'https://api.github.com/users/jacquev6/events{/privacy}',
'html_url': 'https://github.com/jacquev6',
'updated_at': '2013-03-12T22:13:32Z',
'plan': {
'collaborators': 1,
'name': 'micro',
'private_repos': 5,
'space': 614400,
},
'received_events_url': 'https://api.github.com/users/jacquev6/received_events',
'starred_url': 'https://api.github.com/users/jacquev6/starred{/owner}{/repo}',
'public_gists': 2,
'name': 'Vincent Jacques',
'organizations_url': 'https://api.github.com/users/jacquev6/orgs',
'url': 'https://api.github.com/users/jacquev6',
'created_at': '2010-07-09T06:10:06Z',
'avatar_url': 'https://secure.gravatar.com/avatar/b68de5ae38616c296fa345d2b9df2225?d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-user-420.png',
'repos_url': 'https://api.github.com/users/jacquev6/repos',
'following': 38,
'login': 'jacquev6',
}
planRawData = {
'collaborators': 1,
'name': 'micro',
'private_repos': 5,
'space': 614400,
}
def testCompletedObject(self):
user = self.g.get_user("jacquev6")
self.assertTrue(user._CompletableGithubObject__completed)
self.assertEqual(user.raw_data, RawData.jacquev6RawData)
def testNotYetCompletedObject(self):
user = self.g.get_user().get_repo("PyGithub").owner
self.assertFalse(user._CompletableGithubObject__completed)
self.assertEqual(user.raw_data, RawData.jacquev6RawData)
self.assertTrue(user._CompletableGithubObject__completed)
def testNonCompletableObject(self):
plan = self.g.get_user().plan
self.assertEqual(plan.raw_data, RawData.planRawData)
def testCreateObjectFromRawData(self):
user = self.g.create_from_raw_data(github.NamedUser.NamedUser, RawData.jacquev6RawData)
self.assertEqual(user._CompletableGithubObject__completed, True)
self.assertEqual(user.name, "Vincent Jacques")
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
from . import Framework
import github.NamedUser
class RawData(Framework.TestCase):
jacquev6RawData = {
'disk_usage': 13812,
'private_gists': 5,
'public_repos': 21,
'subscriptions_url': 'https://api.github.com/users/jacquev6/subscriptions',
'gravatar_id': 'b68de5ae38616c296fa345d2b9df2225',
'hireable': False,
'id': 327146,
'followers_url': 'https://api.github.com/users/jacquev6/followers',
'following_url': 'https://api.github.com/users/jacquev6/following',
'collaborators': 1,
'total_private_repos': 4,
'blog': 'http://vincent-jacques.net',
'followers': 22,
'location': 'Paris, France',
'type': 'User',
'email': 'vincent@vincent-jacques.net',
'bio': '',
'gists_url': 'https://api.github.com/users/jacquev6/gists{/gist_id}',
'owned_private_repos': 4,
'company': 'Criteo',
'events_url': 'https://api.github.com/users/jacquev6/events{/privacy}',
'html_url': 'https://github.com/jacquev6',
'updated_at': '2013-03-12T22:13:32Z',
'plan': {
'collaborators': 1,
'name': 'micro',
'private_repos': 5,
'space': 614400,
},
'received_events_url': 'https://api.github.com/users/jacquev6/received_events',
'starred_url': 'https://api.github.com/users/jacquev6/starred{/owner}{/repo}',
'public_gists': 2,
'name': 'Vincent Jacques',
'organizations_url': 'https://api.github.com/users/jacquev6/orgs',
'url': 'https://api.github.com/users/jacquev6',
'created_at': '2010-07-09T06:10:06Z',
'avatar_url': 'https://secure.gravatar.com/avatar/b68de5ae38616c296fa345d2b9df2225?d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-user-420.png',
'repos_url': 'https://api.github.com/users/jacquev6/repos',
'following': 38,
'login': 'jacquev6',
}
planRawData = {
'collaborators': 1,
'name': 'micro',
'private_repos': 5,
'space': 614400,
}
def testCompletedObject(self):
user = self.g.get_user("jacquev6")
self.assertTrue(user._CompletableGithubObject__completed)
self.assertEqual(user.raw_data, RawData.jacquev6RawData)
def testNotYetCompletedObject(self):
user = self.g.get_user().get_repo("PyGithub").owner
self.assertFalse(user._CompletableGithubObject__completed)
self.assertEqual(user.raw_data, RawData.jacquev6RawData)
self.assertTrue(user._CompletableGithubObject__completed)
def testNonCompletableObject(self):
plan = self.g.get_user().plan
self.assertEqual(plan.raw_data, RawData.planRawData)
def testCreateObjectFromRawData(self):
user = self.g.create_from_raw_data(github.NamedUser.NamedUser, RawData.jacquev6RawData)
self.assertEqual(user._CompletableGithubObject__completed, True)
self.assertEqual(user.name, "Vincent Jacques")
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
#!/usr/bin/env python
# vim: sw=2 ts=2
import click
import os
import sys
@click.command()
### Cluster options
@click.option('--console-port', default='443', type=click.IntRange(1,65535), help='OpenShift web console port',
show_default=True)
@click.option('--deployment-type', default='openshift-enterprise', help='OpenShift deployment type',
show_default=True)
@click.option('--openshift-sdn', default='redhat/openshift-ovs-multitenant', type=click.Choice(['redhat/openshift-ovs-subnet', 'redhat/openshift-ovs-multitenant']), help='OpenShift SDN',
show_default=True)
### AWS/EC2 options
@click.option('--glusterfs-stack-name', help='Specify a gluster stack name. Making the name unique will allow for multiple deployments',
show_default=True)
@click.option('--region', default='us-east-1', help='ec2 region',
show_default=True)
@click.option('--ami', default='ami-fbc89880', help='ec2 ami',
show_default=True)
@click.option('--node-instance-type', default='m4.2xlarge', help='ec2 instance type',
show_default=True)
@click.option('--use-cloudformation-facts', is_flag=True, help='Use cloudformation to populate facts. Requires Deployment >= OCP 3.5',
show_default=True)
@click.option('--keypair', help='ec2 keypair name',
show_default=True)
@click.option('--private-subnet-id1', help='Specify a Private subnet within the existing VPC',
show_default=True)
@click.option('--private-subnet-id2', help='Specify a Private subnet within the existing VPC',
show_default=True)
@click.option('--private-subnet-id3', help='Specify a Private subnet within the existing VPC',
show_default=True)
@click.option('--glusterfs-volume-size', default='500', help='Gluster volume size in GB',
show_default=True)
@click.option('--glusterfs-volume-type', default='st1', help='Gluster volume type',
show_default=True)
@click.option('--iops', help='Specfify the IOPS for a volume (used only with IO1)',
show_default=True)
### DNS options
@click.option('--public-hosted-zone', help='hosted zone for accessing the environment')
### Subscription and Software options
@click.option('--rhsm-user', help='Red Hat Subscription Management User')
@click.option('--rhsm-password', help='Red Hat Subscription Management Password',
hide_input=True,)
@click.option('--rhsm-pool', help='Red Hat Subscription Management Pool Name')
### Miscellaneous options
@click.option('--containerized', default='False', help='Containerized installation of OpenShift',
show_default=True)
@click.option('--iam-role', help='Specify the name of the existing IAM Instance profile',
show_default=True)
@click.option('--node-sg', help='Specify the already existing node security group id',
show_default=True)
@click.option('--existing-stack', help='Specify the name of the existing CloudFormation stack')
@click.option('--no-confirm', is_flag=True,
help='Skip confirmation prompt')
@click.help_option('--help', '-h')
@click.option('-v', '--verbose', count=True)
def launch_refarch_env(region=None,
ami=None,
no_confirm=False,
node_instance_type=None,
glusterfs_stack_name=None,
keypair=None,
public_hosted_zone=None,
deployment_type=None,
console_port=443,
rhsm_user=None,
rhsm_password=None,
rhsm_pool=None,
containerized=None,
node_type=None,
private_subnet_id1=None,
private_subnet_id2=None,
private_subnet_id3=None,
glusterfs_volume_type=None,
glusterfs_volume_size=None,
openshift_sdn=None,
iops=None,
node_sg=None,
iam_role=None,
existing_stack=None,
use_cloudformation_facts=False,
verbose=0):
# Need to prompt for the R53 zone:
if public_hosted_zone is None:
public_hosted_zone = click.prompt('Hosted DNS zone for accessing the environment')
if existing_stack is None:
existing_stack = click.prompt('Specify the name of the existing CloudFormation stack')
if glusterfs_stack_name is None:
glusterfs_stack_name = click.prompt('Specify a unique name for the CNS CloudFormation stack')
# If no keypair is specified fail:
if keypair is None:
keypair = click.prompt('A SSH keypair must be specified or created')
# If the user already provided values, don't bother asking again
if deployment_type in ['openshift-enterprise'] and rhsm_user is None:
rhsm_user = click.prompt("RHSM username?")
if deployment_type in ['openshift-enterprise'] and rhsm_password is None:
rhsm_password = click.prompt("RHSM password?", hide_input=True)
if deployment_type in ['openshift-enterprise'] and rhsm_pool is None:
rhsm_pool = click.prompt("RHSM Pool ID or Subscription Name for OpenShift?")
# Prompt for vars if they are not defined
if use_cloudformation_facts and iam_role is None:
iam_role = "Computed by Cloudformations"
elif iam_role is None:
iam_role = click.prompt("Specify the IAM Role of the node?")
if use_cloudformation_facts and node_sg is None:
node_sg = "Computed by Cloudformations"
elif node_sg is None:
node_sg = click.prompt("Specify the Security Group for the nodes?")
if use_cloudformation_facts and private_subnet_id1 is None:
private_subnet_id1 = "Computed by Cloudformations"
elif private_subnet_id1 is None:
private_subnet_id1 = click.prompt("Specify the first private subnet for the nodes?")
if use_cloudformation_facts and private_subnet_id2 is None:
private_subnet_id2 = "Computed by Cloudformations"
elif private_subnet_id2 is None:
private_subnet_id2 = click.prompt("Specify the second private subnet for the nodes?")
if use_cloudformation_facts and private_subnet_id3 is None:
private_subnet_id3 = "Computed by Cloudformations"
elif private_subnet_id3 is None:
private_subnet_id3 = click.prompt("Specify the third private subnet for the nodes?")
if glusterfs_volume_type in ['io1']:
iops = click.prompt('Specify a numeric value for iops')
if iops is None:
iops = "NA"
# Hidden facts for infrastructure.yaml
create_key = "no"
create_vpc = "no"
add_node = "yes"
deploy_glusterfs = "true"
node_type = "glusterfs"
# Display information to the user about their choices
if use_cloudformation_facts:
click.echo('Configured values:')
click.echo('\tami: %s' % ami)
click.echo('\tregion: %s' % region)
click.echo('\tglusterfs_stack_name: %s' % glusterfs_stack_name)
click.echo('\tnode_instance_type: %s' % node_instance_type)
click.echo('\tglusterfs_volume_type: %s' % glusterfs_volume_type)
click.echo('\tglusterfs_volume_size: %s' % glusterfs_volume_size)
click.echo('\tiops: %s' % iops)
click.echo('\topenshift_sdn: %s' % openshift_sdn)
click.echo('\tkeypair: %s' % keypair)
click.echo('\tdeployment_type: %s' % deployment_type)
click.echo('\tpublic_hosted_zone: %s' % public_hosted_zone)
click.echo('\tconsole port: %s' % console_port)
click.echo('\trhsm_user: %s' % rhsm_user)
click.echo('\trhsm_password: *******')
click.echo('\trhsm_pool: %s' % rhsm_pool)
click.echo('\tcontainerized: %s' % containerized)
click.echo('\texisting_stack: %s' % existing_stack)
click.echo('\tSubnets, Security Groups, and IAM Roles will be gather from the CloudFormation')
click.echo("")
else:
click.echo('Configured values:')
click.echo('\tami: %s' % ami)
click.echo('\tregion: %s' % region)
click.echo('\tglusterfs_stack_name: %s' % glusterfs_stack_name)
click.echo('\tnode_instance_type: %s' % node_instance_type)
click.echo('\tprivate_subnet_id1: %s' % private_subnet_id1)
click.echo('\tprivate_subnet_id2: %s' % private_subnet_id2)
click.echo('\tprivate_subnet_id3: %s' % private_subnet_id3)
click.echo('\tglusterfs_volume_type: %s' % glusterfs_volume_type)
click.echo('\tglusterfs_volume_size: %s' % glusterfs_volume_size)
click.echo('\tiops: %s' % iops)
click.echo('\openshift_sdn: %s' % openshift_sdn)
click.echo('\tkeypair: %s' % keypair)
click.echo('\tkeypair: %s' % keypair)
click.echo('\tnode_sg: %s' % node_sg)
click.echo('\tdeployment_type: %s' % deployment_type)
click.echo('\tpublic_hosted_zone: %s' % public_hosted_zone)
click.echo('\tconsole port: %s' % console_port)
click.echo('\trhsm_user: %s' % rhsm_user)
click.echo('\trhsm_password: *******')
click.echo('\trhsm_pool: %s' % rhsm_pool)
click.echo('\tcontainerized: %s' % containerized)
click.echo('\tiam_role: %s' % iam_role)
click.echo('\texisting_stack: %s' % existing_stack)
click.echo("")
if not no_confirm:
click.confirm('Continue using these values?', abort=True)
playbooks = ['playbooks/infrastructure.yaml', 'playbooks/add-node.yaml']
for playbook in playbooks:
# hide cache output unless in verbose mode
devnull='> /dev/null'
if verbose > 0:
devnull=''
# refresh the inventory cache to prevent stale hosts from
# interferring with re-running
command='inventory/aws/hosts/ec2.py --refresh-cache %s' % (devnull)
os.system(command)
# remove any cached facts to prevent stale data during a re-run
command='rm -rf .ansible/cached_facts'
os.system(command)
if use_cloudformation_facts:
command='ansible-playbook -i inventory/aws/hosts -e \'region=%s \
ami=%s \
keypair=%s \
glusterfs_stack_name=%s \
add_node=yes \
node_instance_type=%s \
public_hosted_zone=%s \
deployment_type=%s \
console_port=%s \
rhsm_user=%s \
rhsm_password=%s \
rhsm_pool="%s" \
containerized=%s \
node_type=glusterfs \
key_path=/dev/null \
create_key=%s \
create_vpc=%s \
deploy_glusterfs=%s \
glusterfs_volume_type=%s \
glusterfs_volume_size=%s \
iops=%s \
openshift_sdn=%s \
stack_name=%s \' %s' % (region,
ami,
keypair,
glusterfs_stack_name,
node_instance_type,
public_hosted_zone,
deployment_type,
console_port,
rhsm_user,
rhsm_password,
rhsm_pool,
containerized,
create_key,
create_vpc,
deploy_glusterfs,
glusterfs_volume_type,
glusterfs_volume_size,
iops,
openshift_sdn,
existing_stack,
playbook)
else:
command='ansible-playbook -i inventory/aws/hosts -e \'region=%s \
ami=%s \
keypair=%s \
glusterfs_stack_name=%s \
add_node=yes \
node_sg=%s \
node_instance_type=%s \
private_subnet_id1=%s \
private_subnet_id2=%s \
private_subnet_id3=%s \
public_hosted_zone=%s \
deployment_type=%s \
console_port=%s \
rhsm_user=%s \
rhsm_password=%s \
rhsm_pool="%s" \
containerized=%s \
node_type=glusterfs \
iam_role=%s \
key_path=/dev/null \
create_key=%s \
create_vpc=%s \
deploy_glusterfs=%s \
glusterfs_volume_type=%s \
glusterfs_volume_size=%s \
iops=%s \
openshift_sdn=%s \
stack_name=%s \' %s' % (region,
ami,
keypair,
glusterfs_stack_name,
node_sg,
node_instance_type,
private_subnet_id1,
private_subnet_id2,
private_subnet_id3,
public_hosted_zone,
deployment_type,
console_port,
rhsm_user,
rhsm_password,
rhsm_pool,
containerized,
iam_role,
create_key,
create_vpc,
deploy_glusterfs,
glusterfs_volume_type,
glusterfs_volume_size,
iops,
openshift_sdn,
existing_stack,
playbook)
if verbose > 0:
command += " -" + "".join(['v']*verbose)
click.echo('We are running: %s' % command)
status = os.system(command)
if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
return os.WEXITSTATUS(status)
if __name__ == '__main__':
# check for AWS access info
if os.getenv('AWS_ACCESS_KEY_ID') is None or os.getenv('AWS_SECRET_ACCESS_KEY') is None:
print('AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY **MUST** be exported as environment variables.')
sys.exit(1)
launch_refarch_env(auto_envvar_prefix='OSE_REFArch')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.